agno 2.3.7__py3-none-any.whl → 2.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/models/base.py CHANGED
@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod
5
5
  from dataclasses import dataclass, field
6
6
  from hashlib import md5
7
7
  from pathlib import Path
8
- from time import time
8
+ from time import sleep, time
9
9
  from types import AsyncGeneratorType, GeneratorType
10
10
  from typing import (
11
11
  Any,
@@ -24,7 +24,7 @@ from uuid import uuid4
24
24
 
25
25
  from pydantic import BaseModel
26
26
 
27
- from agno.exceptions import AgentRunException
27
+ from agno.exceptions import AgentRunException, ModelProviderError
28
28
  from agno.media import Audio, File, Image, Video
29
29
  from agno.models.message import Citations, Message
30
30
  from agno.models.metrics import Metrics
@@ -146,10 +146,133 @@ class Model(ABC):
146
146
  cache_ttl: Optional[int] = None
147
147
  cache_dir: Optional[str] = None
148
148
 
149
+ # Retry configuration for model provider errors
150
+ # Number of retries to attempt when a ModelProviderError occurs
151
+ retries: int = 0
152
+ # Delay between retries (in seconds)
153
+ delay_between_retries: int = 1
154
+ # Exponential backoff: if True, the delay between retries is doubled each time
155
+ exponential_backoff: bool = False
156
+
149
157
  def __post_init__(self):
150
158
  if self.provider is None and self.name is not None:
151
159
  self.provider = f"{self.name} ({self.id})"
152
160
 
161
+ def _get_retry_delay(self, attempt: int) -> float:
162
+ """Calculate the delay before the next retry attempt."""
163
+ if self.exponential_backoff:
164
+ return self.delay_between_retries * (2**attempt)
165
+ return self.delay_between_retries
166
+
167
+ def _invoke_with_retry(self, **kwargs) -> ModelResponse:
168
+ """
169
+ Invoke the model with retry logic for ModelProviderError.
170
+
171
+ This method wraps the invoke() call and retries on ModelProviderError
172
+ with optional exponential backoff.
173
+ """
174
+ last_exception: Optional[ModelProviderError] = None
175
+
176
+ for attempt in range(self.retries + 1):
177
+ try:
178
+ return self.invoke(**kwargs)
179
+ except ModelProviderError as e:
180
+ last_exception = e
181
+ if attempt < self.retries:
182
+ delay = self._get_retry_delay(attempt)
183
+ log_warning(
184
+ f"Model provider error (attempt {attempt + 1}/{self.retries + 1}): {e}. Retrying in {delay}s..."
185
+ )
186
+ sleep(delay)
187
+ else:
188
+ log_error(f"Model provider error after {self.retries + 1} attempts: {e}")
189
+
190
+ # If we've exhausted all retries, raise the last exception
191
+ raise last_exception # type: ignore
192
+
193
+ async def _ainvoke_with_retry(self, **kwargs) -> ModelResponse:
194
+ """
195
+ Asynchronously invoke the model with retry logic for ModelProviderError.
196
+
197
+ This method wraps the ainvoke() call and retries on ModelProviderError
198
+ with optional exponential backoff.
199
+ """
200
+ last_exception: Optional[ModelProviderError] = None
201
+
202
+ for attempt in range(self.retries + 1):
203
+ try:
204
+ return await self.ainvoke(**kwargs)
205
+ except ModelProviderError as e:
206
+ last_exception = e
207
+ if attempt < self.retries:
208
+ delay = self._get_retry_delay(attempt)
209
+ log_warning(
210
+ f"Model provider error (attempt {attempt + 1}/{self.retries + 1}): {e}. Retrying in {delay}s..."
211
+ )
212
+ await asyncio.sleep(delay)
213
+ else:
214
+ log_error(f"Model provider error after {self.retries + 1} attempts: {e}")
215
+
216
+ # If we've exhausted all retries, raise the last exception
217
+ raise last_exception # type: ignore
218
+
219
+ def _invoke_stream_with_retry(self, **kwargs) -> Iterator[ModelResponse]:
220
+ """
221
+ Invoke the model stream with retry logic for ModelProviderError.
222
+
223
+ This method wraps the invoke_stream() call and retries on ModelProviderError
224
+ with optional exponential backoff. Note that retries restart the entire stream.
225
+ """
226
+ last_exception: Optional[ModelProviderError] = None
227
+
228
+ for attempt in range(self.retries + 1):
229
+ try:
230
+ yield from self.invoke_stream(**kwargs)
231
+ return # Success, exit the retry loop
232
+ except ModelProviderError as e:
233
+ last_exception = e
234
+ if attempt < self.retries:
235
+ delay = self._get_retry_delay(attempt)
236
+ log_warning(
237
+ f"Model provider error during stream (attempt {attempt + 1}/{self.retries + 1}): {e}. "
238
+ f"Retrying in {delay}s..."
239
+ )
240
+ sleep(delay)
241
+ else:
242
+ log_error(f"Model provider error after {self.retries + 1} attempts: {e}")
243
+
244
+ # If we've exhausted all retries, raise the last exception
245
+ raise last_exception # type: ignore
246
+
247
+ async def _ainvoke_stream_with_retry(self, **kwargs) -> AsyncIterator[ModelResponse]:
248
+ """
249
+ Asynchronously invoke the model stream with retry logic for ModelProviderError.
250
+
251
+ This method wraps the ainvoke_stream() call and retries on ModelProviderError
252
+ with optional exponential backoff. Note that retries restart the entire stream.
253
+ """
254
+ last_exception: Optional[ModelProviderError] = None
255
+
256
+ for attempt in range(self.retries + 1):
257
+ try:
258
+ async for response in self.ainvoke_stream(**kwargs):
259
+ yield response
260
+ return # Success, exit the retry loop
261
+ except ModelProviderError as e:
262
+ last_exception = e
263
+ if attempt < self.retries:
264
+ delay = self._get_retry_delay(attempt)
265
+ log_warning(
266
+ f"Model provider error during stream (attempt {attempt + 1}/{self.retries + 1}): {e}. "
267
+ f"Retrying in {delay}s..."
268
+ )
269
+ await asyncio.sleep(delay)
270
+ else:
271
+ log_error(f"Model provider error after {self.retries + 1} attempts: {e}")
272
+
273
+ # If we've exhausted all retries, raise the last exception
274
+ raise last_exception # type: ignore
275
+
153
276
  def to_dict(self) -> Dict[str, Any]:
154
277
  fields = {"name", "id", "provider"}
155
278
  _dict = {field: getattr(self, field) for field in fields if getattr(self, field) is not None}
@@ -734,8 +857,8 @@ class Model(ABC):
734
857
  Returns:
735
858
  Tuple[Message, bool]: (assistant_message, should_continue)
736
859
  """
737
- # Generate response
738
- provider_response = self.invoke(
860
+ # Generate response with retry logic for ModelProviderError
861
+ provider_response = self._invoke_with_retry(
739
862
  assistant_message=assistant_message,
740
863
  messages=messages,
741
864
  response_format=response_format,
@@ -791,8 +914,8 @@ class Model(ABC):
791
914
  Returns:
792
915
  Tuple[Message, bool]: (assistant_message, should_continue)
793
916
  """
794
- # Generate response
795
- provider_response = await self.ainvoke(
917
+ # Generate response with retry logic for ModelProviderError
918
+ provider_response = await self._ainvoke_with_retry(
796
919
  messages=messages,
797
920
  response_format=response_format,
798
921
  tools=tools,
@@ -913,10 +1036,10 @@ class Model(ABC):
913
1036
  compress_tool_results: bool = False,
914
1037
  ) -> Iterator[ModelResponse]:
915
1038
  """
916
- Process a streaming response from the model.
1039
+ Process a streaming response from the model with retry logic for ModelProviderError.
917
1040
  """
918
1041
 
919
- for response_delta in self.invoke_stream(
1042
+ for response_delta in self._invoke_stream_with_retry(
920
1043
  messages=messages,
921
1044
  assistant_message=assistant_message,
922
1045
  response_format=response_format,
@@ -1132,9 +1255,9 @@ class Model(ABC):
1132
1255
  compress_tool_results: bool = False,
1133
1256
  ) -> AsyncIterator[ModelResponse]:
1134
1257
  """
1135
- Process a streaming response from the model.
1258
+ Process a streaming response from the model with retry logic for ModelProviderError.
1136
1259
  """
1137
- async for response_delta in self.ainvoke_stream(
1260
+ async for response_delta in self._ainvoke_stream_with_retry(
1138
1261
  messages=messages,
1139
1262
  assistant_message=assistant_message,
1140
1263
  response_format=response_format,
@@ -1142,7 +1265,7 @@ class Model(ABC):
1142
1265
  tool_choice=tool_choice or self._tool_choice,
1143
1266
  run_response=run_response,
1144
1267
  compress_tool_results=compress_tool_results,
1145
- ): # type: ignore
1268
+ ):
1146
1269
  for model_response_delta in self._populate_stream_data(
1147
1270
  stream_data=stream_data,
1148
1271
  model_response_delta=response_delta,
@@ -307,6 +307,8 @@ class OpenAIResponses(Model):
307
307
 
308
308
  def _upload_file(self, file: File) -> Optional[str]:
309
309
  """Upload a file to the OpenAI vector database."""
310
+ from pathlib import Path
311
+ from urllib.parse import urlparse
310
312
 
311
313
  if file.url is not None:
312
314
  file_content_tuple = file.file_url_content
@@ -314,13 +316,12 @@ class OpenAIResponses(Model):
314
316
  file_content = file_content_tuple[0]
315
317
  else:
316
318
  return None
317
- file_name = file.url.split("/")[-1]
319
+ file_name = Path(urlparse(file.url).path).name or "file"
318
320
  file_tuple = (file_name, file_content)
319
321
  result = self.get_client().files.create(file=file_tuple, purpose="assistants")
320
322
  return result.id
321
323
  elif file.filepath is not None:
322
324
  import mimetypes
323
- from pathlib import Path
324
325
 
325
326
  file_path = file.filepath if isinstance(file.filepath, Path) else Path(file.filepath)
326
327
  if file_path.exists() and file_path.is_file():
@@ -110,7 +110,7 @@ async def map_a2a_request_to_run_input(request_body: dict, stream: bool = True)
110
110
 
111
111
  Returns:
112
112
  RunInput: The Agno RunInput
113
- stream: Wheter we are in stream mode
113
+ stream: Whether we are in stream mode
114
114
  """
115
115
 
116
116
  # 1. Validate the request
agno/os/middleware/jwt.py CHANGED
@@ -188,18 +188,20 @@ class JWTMiddleware(BaseHTTPMiddleware):
188
188
 
189
189
  # Extract dependency claims
190
190
  dependencies = {}
191
- for claim in self.dependencies_claims:
192
- if claim in payload:
193
- dependencies[claim] = payload[claim]
191
+ if self.dependencies_claims:
192
+ for claim in self.dependencies_claims:
193
+ if claim in payload:
194
+ dependencies[claim] = payload[claim]
194
195
 
195
196
  if dependencies:
196
197
  request.state.dependencies = dependencies
197
198
 
198
199
  # Extract session state claims
199
200
  session_state = {}
200
- for claim in self.session_state_claims:
201
- if claim in payload:
202
- session_state[claim] = payload[claim]
201
+ if self.session_state_claims:
202
+ for claim in self.session_state_claims:
203
+ if claim in payload:
204
+ session_state[claim] = payload[claim]
203
205
 
204
206
  if session_state:
205
207
  request.state.session_state = session_state
agno/team/team.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import asyncio
4
4
  import contextlib
5
5
  import json
6
+ import time
6
7
  import warnings
7
8
  from collections import ChainMap, deque
8
9
  from copy import copy
@@ -36,7 +37,6 @@ from agno.compression.manager import CompressionManager
36
37
  from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
37
38
  from agno.exceptions import (
38
39
  InputCheckError,
39
- ModelProviderError,
40
40
  OutputCheckError,
41
41
  RunCancelledException,
42
42
  )
@@ -115,7 +115,6 @@ from agno.utils.events import (
115
115
  create_team_run_cancelled_event,
116
116
  create_team_run_completed_event,
117
117
  create_team_run_content_completed_event,
118
- create_team_run_error_event,
119
118
  create_team_run_output_content_event,
120
119
  create_team_run_started_event,
121
120
  create_team_session_summary_completed_event,
@@ -1554,7 +1553,12 @@ class Team:
1554
1553
 
1555
1554
  # 4. Start memory creation in background thread
1556
1555
  memory_future = None
1557
- if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
1556
+ if (
1557
+ run_messages.user_message is not None
1558
+ and self.memory_manager is not None
1559
+ and self.enable_user_memories
1560
+ and not self.enable_agentic_memory
1561
+ ):
1558
1562
  log_debug("Starting memory creation in background thread.")
1559
1563
  memory_future = self.background_executor.submit(
1560
1564
  self._make_memories, run_messages=run_messages, user_id=user_id
@@ -1761,7 +1765,12 @@ class Team:
1761
1765
 
1762
1766
  # 4. Start memory creation in background thread
1763
1767
  memory_future = None
1764
- if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
1768
+ if (
1769
+ run_messages.user_message is not None
1770
+ and self.memory_manager is not None
1771
+ and self.enable_user_memories
1772
+ and not self.enable_agentic_memory
1773
+ ):
1765
1774
  log_debug("Starting memory creation in background thread.")
1766
1775
  memory_future = self.background_executor.submit(
1767
1776
  self._make_memories, run_messages=run_messages, user_id=user_id
@@ -1963,7 +1972,6 @@ class Team:
1963
1972
  session_id: Optional[str] = None,
1964
1973
  session_state: Optional[Dict[str, Any]] = None,
1965
1974
  user_id: Optional[str] = None,
1966
- retries: Optional[int] = None,
1967
1975
  audio: Optional[Sequence[Audio]] = None,
1968
1976
  images: Optional[Sequence[Image]] = None,
1969
1977
  videos: Optional[Sequence[Video]] = None,
@@ -1991,7 +1999,6 @@ class Team:
1991
1999
  session_state: Optional[Dict[str, Any]] = None,
1992
2000
  run_context: Optional[RunContext] = None,
1993
2001
  user_id: Optional[str] = None,
1994
- retries: Optional[int] = None,
1995
2002
  audio: Optional[Sequence[Audio]] = None,
1996
2003
  images: Optional[Sequence[Image]] = None,
1997
2004
  videos: Optional[Sequence[Video]] = None,
@@ -2020,7 +2027,6 @@ class Team:
2020
2027
  session_state: Optional[Dict[str, Any]] = None,
2021
2028
  run_context: Optional[RunContext] = None,
2022
2029
  user_id: Optional[str] = None,
2023
- retries: Optional[int] = None,
2024
2030
  audio: Optional[Sequence[Audio]] = None,
2025
2031
  images: Optional[Sequence[Image]] = None,
2026
2032
  videos: Optional[Sequence[Video]] = None,
@@ -2059,6 +2065,7 @@ class Team:
2059
2065
  DeprecationWarning,
2060
2066
  stacklevel=2,
2061
2067
  )
2068
+ yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
2062
2069
 
2063
2070
  background_tasks = kwargs.pop("background_tasks", None)
2064
2071
  if background_tasks is not None:
@@ -2196,18 +2203,11 @@ class Team:
2196
2203
  run_response.metrics = Metrics()
2197
2204
  run_response.metrics.start_timer()
2198
2205
 
2199
- # If no retries are set, use the team's default retries
2200
- retries = retries if retries is not None else self.retries
2201
-
2202
- # Run the team
2203
- last_exception = None
2204
- num_attempts = retries + 1
2205
-
2206
- yield_run_output = bool(yield_run_output or yield_run_response) # For backwards compatibility
2206
+ # Set up retry logic
2207
+ num_attempts = self.retries + 1
2207
2208
 
2208
2209
  for attempt in range(num_attempts):
2209
- # Initialize the current run
2210
-
2210
+ log_debug(f"Retrying Team run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
2211
2211
  # Run the team
2212
2212
  try:
2213
2213
  if stream:
@@ -2246,18 +2246,6 @@ class Team:
2246
2246
  except (InputCheckError, OutputCheckError) as e:
2247
2247
  log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
2248
2248
  raise e
2249
- except ModelProviderError as e:
2250
- import time
2251
-
2252
- log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
2253
-
2254
- last_exception = e
2255
- if attempt < num_attempts - 1: # Don't sleep on the last attempt
2256
- if self.exponential_backoff:
2257
- delay = 2**attempt * self.delay_between_retries
2258
- else:
2259
- delay = self.delay_between_retries
2260
- time.sleep(delay)
2261
2249
  except KeyboardInterrupt:
2262
2250
  run_response.content = "Operation cancelled by user"
2263
2251
  run_response.status = RunStatus.cancelled
@@ -2270,21 +2258,24 @@ class Team:
2270
2258
  )
2271
2259
  else:
2272
2260
  return run_response
2261
+ except Exception as e:
2262
+ # Check if this is the last attempt
2263
+ if attempt < num_attempts - 1:
2264
+ # Calculate delay with exponential backoff if enabled
2265
+ if self.exponential_backoff:
2266
+ delay = self.delay_between_retries * (2**attempt)
2267
+ else:
2268
+ delay = self.delay_between_retries
2273
2269
 
2274
- # If we get here, all retries failed
2275
- if last_exception is not None:
2276
- log_error(
2277
- f"Failed after {num_attempts} attempts. Last error using {last_exception.model_name}({last_exception.model_id})"
2278
- )
2279
- if stream:
2280
- return generator_wrapper(create_team_run_error_event(run_response, error=str(last_exception)))
2281
-
2282
- raise last_exception
2283
- else:
2284
- if stream:
2285
- return generator_wrapper(create_team_run_error_event(run_response, error=str(last_exception)))
2270
+ log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
2271
+ time.sleep(delay)
2272
+ else:
2273
+ # Final attempt failed - re-raise the exception
2274
+ log_error(f"All {num_attempts} attempts failed. Final error: {str(e)}")
2275
+ raise e
2286
2276
 
2287
- raise Exception(f"Failed after {num_attempts} attempts.")
2277
+ # If we get here, all retries failed
2278
+ raise Exception(f"Failed after {num_attempts} attempts.")
2288
2279
 
2289
2280
  async def _arun(
2290
2281
  self,
@@ -2411,7 +2402,12 @@ class Team:
2411
2402
 
2412
2403
  # 6. Start memory creation in background task
2413
2404
  memory_task = None
2414
- if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
2405
+ if (
2406
+ run_messages.user_message is not None
2407
+ and self.memory_manager is not None
2408
+ and self.enable_user_memories
2409
+ and not self.enable_agentic_memory
2410
+ ):
2415
2411
  log_debug("Starting memory creation in background task.")
2416
2412
  memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
2417
2413
 
@@ -2649,7 +2645,12 @@ class Team:
2649
2645
 
2650
2646
  # 7. Start memory creation in background task
2651
2647
  memory_task = None
2652
- if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
2648
+ if (
2649
+ run_messages.user_message is not None
2650
+ and self.memory_manager is not None
2651
+ and self.enable_user_memories
2652
+ and not self.enable_agentic_memory
2653
+ ):
2653
2654
  log_debug("Starting memory creation in background task.")
2654
2655
  memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
2655
2656
 
@@ -2867,7 +2868,6 @@ class Team:
2867
2868
  session_state: Optional[Dict[str, Any]] = None,
2868
2869
  run_context: Optional[RunContext] = None,
2869
2870
  user_id: Optional[str] = None,
2870
- retries: Optional[int] = None,
2871
2871
  audio: Optional[Sequence[Audio]] = None,
2872
2872
  images: Optional[Sequence[Image]] = None,
2873
2873
  videos: Optional[Sequence[Video]] = None,
@@ -2895,7 +2895,6 @@ class Team:
2895
2895
  session_state: Optional[Dict[str, Any]] = None,
2896
2896
  run_context: Optional[RunContext] = None,
2897
2897
  user_id: Optional[str] = None,
2898
- retries: Optional[int] = None,
2899
2898
  audio: Optional[Sequence[Audio]] = None,
2900
2899
  images: Optional[Sequence[Image]] = None,
2901
2900
  videos: Optional[Sequence[Video]] = None,
@@ -2924,7 +2923,6 @@ class Team:
2924
2923
  session_state: Optional[Dict[str, Any]] = None,
2925
2924
  run_context: Optional[RunContext] = None,
2926
2925
  user_id: Optional[str] = None,
2927
- retries: Optional[int] = None,
2928
2926
  audio: Optional[Sequence[Audio]] = None,
2929
2927
  images: Optional[Sequence[Image]] = None,
2930
2928
  videos: Optional[Sequence[Video]] = None,
@@ -2959,6 +2957,8 @@ class Team:
2959
2957
  stacklevel=2,
2960
2958
  )
2961
2959
 
2960
+ yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
2961
+
2962
2962
  background_tasks = kwargs.pop("background_tasks", None)
2963
2963
  if background_tasks is not None:
2964
2964
  from fastapi import BackgroundTasks
@@ -3084,20 +3084,18 @@ class Team:
3084
3084
  run_response.metrics = Metrics()
3085
3085
  run_response.metrics.start_timer()
3086
3086
 
3087
- # If no retries are set, use the team's default retries
3088
- retries = retries if retries is not None else self.retries
3089
-
3090
- # Run the team
3091
- last_exception = None
3092
- num_attempts = retries + 1
3093
-
3094
3087
  yield_run_output = bool(yield_run_output or yield_run_response) # For backwards compatibility
3095
3088
 
3089
+ # Resolve retry parameters
3090
+ num_attempts = self.retries + 1
3091
+
3096
3092
  for attempt in range(num_attempts):
3093
+ log_debug(f"Retrying Team run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
3094
+
3097
3095
  # Run the team
3098
3096
  try:
3099
3097
  if stream:
3100
- response_iterator = self._arun_stream(
3098
+ return self._arun_stream( # type: ignore
3101
3099
  input=validated_input,
3102
3100
  run_response=run_response,
3103
3101
  run_context=run_context,
@@ -3113,7 +3111,6 @@ class Team:
3113
3111
  background_tasks=background_tasks,
3114
3112
  **kwargs,
3115
3113
  )
3116
- return response_iterator # type: ignore
3117
3114
  else:
3118
3115
  return self._arun( # type: ignore
3119
3116
  input=validated_input,
@@ -3133,17 +3130,6 @@ class Team:
3133
3130
  except (InputCheckError, OutputCheckError) as e:
3134
3131
  log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
3135
3132
  raise e
3136
- except ModelProviderError as e:
3137
- log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
3138
- last_exception = e
3139
- if attempt < num_attempts - 1: # Don't sleep on the last attempt
3140
- if self.exponential_backoff:
3141
- delay = 2**attempt * self.delay_between_retries
3142
- else:
3143
- delay = self.delay_between_retries
3144
- import time
3145
-
3146
- time.sleep(delay)
3147
3133
  except KeyboardInterrupt:
3148
3134
  run_response.content = "Operation cancelled by user"
3149
3135
  run_response.status = RunStatus.cancelled
@@ -3156,21 +3142,25 @@ class Team:
3156
3142
  )
3157
3143
  else:
3158
3144
  return run_response
3145
+ except Exception as e:
3146
+ # Check if this is the last attempt
3147
+ if attempt < num_attempts - 1:
3148
+ # Calculate delay with exponential backoff if enabled
3149
+ if self.exponential_backoff:
3150
+ delay = self.delay_between_retries * (2**attempt)
3151
+ else:
3152
+ delay = self.delay_between_retries
3159
3153
 
3160
- # If we get here, all retries failed
3161
- if last_exception is not None:
3162
- log_error(
3163
- f"Failed after {num_attempts} attempts. Last error using {last_exception.model_name}({last_exception.model_id})"
3164
- )
3165
- if stream:
3166
- return async_generator_wrapper(create_team_run_error_event(run_response, error=str(last_exception)))
3167
-
3168
- raise last_exception
3169
- else:
3170
- if stream:
3171
- return async_generator_wrapper(create_team_run_error_event(run_response, error=str(last_exception)))
3154
+ log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
3155
+ time.sleep(delay)
3156
+ continue
3157
+ else:
3158
+ # Final attempt failed - re-raise the exception
3159
+ log_error(f"All {num_attempts} attempts failed. Final error: {str(e)}")
3160
+ raise e
3172
3161
 
3173
- raise Exception(f"Failed after {num_attempts} attempts.")
3162
+ # If we get here, all retries failed
3163
+ raise Exception(f"Failed after {num_attempts} attempts.")
3174
3164
 
3175
3165
  def _update_run_response(
3176
3166
  self,
@@ -3821,7 +3811,12 @@ class Team:
3821
3811
  user_message_str = (
3822
3812
  run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3823
3813
  )
3824
- if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
3814
+ if (
3815
+ user_message_str is not None
3816
+ and user_message_str.strip() != ""
3817
+ and self.memory_manager is not None
3818
+ and self.enable_user_memories
3819
+ ):
3825
3820
  log_debug("Managing user memories")
3826
3821
  self.memory_manager.create_user_memories(
3827
3822
  message=user_message_str,
@@ -3837,7 +3832,12 @@ class Team:
3837
3832
  user_message_str = (
3838
3833
  run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3839
3834
  )
3840
- if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
3835
+ if (
3836
+ user_message_str is not None
3837
+ and user_message_str.strip() != ""
3838
+ and self.memory_manager is not None
3839
+ and self.enable_user_memories
3840
+ ):
3841
3841
  log_debug("Managing user memories")
3842
3842
  await self.memory_manager.acreate_user_memories(
3843
3843
  message=user_message_str,
@@ -8897,12 +8897,13 @@ class Team:
8897
8897
  Optional[List[UserMemory]]: The user memories.
8898
8898
  """
8899
8899
  if self.memory_manager is None:
8900
- return None
8900
+ self._set_memory_manager()
8901
+
8901
8902
  user_id = user_id if user_id is not None else self.user_id
8902
8903
  if user_id is None:
8903
8904
  user_id = "default"
8904
8905
 
8905
- return self.memory_manager.get_user_memories(user_id=user_id)
8906
+ return self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
8906
8907
 
8907
8908
  async def aget_user_memories(self, user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
8908
8909
  """Get the user memories for the given user ID.
@@ -8913,12 +8914,13 @@ class Team:
8913
8914
  Optional[List[UserMemory]]: The user memories.
8914
8915
  """
8915
8916
  if self.memory_manager is None:
8916
- return None
8917
+ self._set_memory_manager()
8918
+
8917
8919
  user_id = user_id if user_id is not None else self.user_id
8918
8920
  if user_id is None:
8919
8921
  user_id = "default"
8920
8922
 
8921
- return await self.memory_manager.aget_user_memories(user_id=user_id)
8923
+ return await self.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
8922
8924
 
8923
8925
  ###########################################################################
8924
8926
  # Handle reasoning content
agno/tools/workflow.py CHANGED
@@ -130,9 +130,13 @@ class WorkflowTools(Toolkit):
130
130
  ) -> str:
131
131
  """Use this tool to execute the workflow with the specified inputs and parameters.
132
132
  After thinking through the requirements, use this tool to run the workflow with appropriate inputs.
133
+
133
134
  Args:
134
- input_data: The input data for the workflow.
135
+ input: The input data for the workflow.
135
136
  """
137
+ if isinstance(input, dict):
138
+ input = RunWorkflowInput.model_validate(input)
139
+
136
140
  try:
137
141
  log_debug(f"Running workflow with input: {input.input_data}")
138
142
 
@@ -170,6 +174,9 @@ class WorkflowTools(Toolkit):
170
174
  input_data: The input data for the workflow (use a `str` for a simple input)
171
175
  additional_data: The additional data for the workflow. This is a dictionary of key-value pairs that will be passed to the workflow. E.g. {"topic": "food", "style": "Humour"}
172
176
  """
177
+ if isinstance(input, dict):
178
+ input = RunWorkflowInput.model_validate(input)
179
+
173
180
  try:
174
181
  log_debug(f"Running workflow with input: {input.input_data}")
175
182