agno 2.3.4__py3-none-any.whl → 2.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. agno/agent/agent.py +177 -41
  2. agno/culture/manager.py +2 -2
  3. agno/db/base.py +330 -8
  4. agno/db/dynamo/dynamo.py +722 -2
  5. agno/db/dynamo/schemas.py +127 -0
  6. agno/db/firestore/firestore.py +573 -1
  7. agno/db/firestore/schemas.py +40 -0
  8. agno/db/gcs_json/gcs_json_db.py +446 -1
  9. agno/db/in_memory/in_memory_db.py +143 -1
  10. agno/db/json/json_db.py +438 -1
  11. agno/db/mongo/async_mongo.py +522 -0
  12. agno/db/mongo/mongo.py +523 -1
  13. agno/db/mongo/schemas.py +29 -0
  14. agno/db/mysql/mysql.py +536 -3
  15. agno/db/mysql/schemas.py +38 -0
  16. agno/db/postgres/async_postgres.py +541 -13
  17. agno/db/postgres/postgres.py +535 -2
  18. agno/db/postgres/schemas.py +38 -0
  19. agno/db/redis/redis.py +468 -1
  20. agno/db/redis/schemas.py +32 -0
  21. agno/db/singlestore/schemas.py +38 -0
  22. agno/db/singlestore/singlestore.py +523 -1
  23. agno/db/sqlite/async_sqlite.py +548 -9
  24. agno/db/sqlite/schemas.py +38 -0
  25. agno/db/sqlite/sqlite.py +537 -5
  26. agno/db/sqlite/utils.py +6 -8
  27. agno/db/surrealdb/models.py +25 -0
  28. agno/db/surrealdb/surrealdb.py +548 -1
  29. agno/eval/accuracy.py +10 -4
  30. agno/eval/performance.py +10 -4
  31. agno/eval/reliability.py +22 -13
  32. agno/exceptions.py +11 -0
  33. agno/hooks/__init__.py +3 -0
  34. agno/hooks/decorator.py +164 -0
  35. agno/knowledge/chunking/semantic.py +2 -2
  36. agno/models/aimlapi/aimlapi.py +2 -3
  37. agno/models/anthropic/claude.py +18 -13
  38. agno/models/aws/bedrock.py +3 -4
  39. agno/models/aws/claude.py +5 -1
  40. agno/models/azure/ai_foundry.py +2 -2
  41. agno/models/azure/openai_chat.py +8 -0
  42. agno/models/cerebras/cerebras.py +63 -11
  43. agno/models/cerebras/cerebras_openai.py +2 -3
  44. agno/models/cohere/chat.py +1 -5
  45. agno/models/cometapi/cometapi.py +2 -3
  46. agno/models/dashscope/dashscope.py +2 -3
  47. agno/models/deepinfra/deepinfra.py +2 -3
  48. agno/models/deepseek/deepseek.py +2 -3
  49. agno/models/fireworks/fireworks.py +2 -3
  50. agno/models/google/gemini.py +9 -7
  51. agno/models/groq/groq.py +2 -3
  52. agno/models/huggingface/huggingface.py +1 -5
  53. agno/models/ibm/watsonx.py +1 -5
  54. agno/models/internlm/internlm.py +2 -3
  55. agno/models/langdb/langdb.py +6 -4
  56. agno/models/litellm/chat.py +2 -2
  57. agno/models/litellm/litellm_openai.py +2 -3
  58. agno/models/meta/llama.py +1 -5
  59. agno/models/meta/llama_openai.py +4 -5
  60. agno/models/mistral/mistral.py +1 -5
  61. agno/models/nebius/nebius.py +2 -3
  62. agno/models/nvidia/nvidia.py +4 -5
  63. agno/models/openai/chat.py +14 -3
  64. agno/models/openai/responses.py +14 -3
  65. agno/models/openrouter/openrouter.py +4 -5
  66. agno/models/perplexity/perplexity.py +2 -3
  67. agno/models/portkey/portkey.py +7 -6
  68. agno/models/requesty/requesty.py +4 -5
  69. agno/models/response.py +2 -1
  70. agno/models/sambanova/sambanova.py +4 -5
  71. agno/models/siliconflow/siliconflow.py +3 -4
  72. agno/models/together/together.py +4 -5
  73. agno/models/vercel/v0.py +4 -5
  74. agno/models/vllm/vllm.py +19 -14
  75. agno/models/xai/xai.py +4 -5
  76. agno/os/app.py +104 -0
  77. agno/os/config.py +13 -0
  78. agno/os/interfaces/whatsapp/router.py +0 -1
  79. agno/os/mcp.py +1 -0
  80. agno/os/router.py +31 -0
  81. agno/os/routers/traces/__init__.py +3 -0
  82. agno/os/routers/traces/schemas.py +414 -0
  83. agno/os/routers/traces/traces.py +499 -0
  84. agno/os/schema.py +10 -1
  85. agno/os/utils.py +57 -0
  86. agno/run/agent.py +1 -0
  87. agno/run/base.py +17 -0
  88. agno/run/team.py +4 -0
  89. agno/session/team.py +1 -0
  90. agno/table.py +10 -0
  91. agno/team/team.py +214 -65
  92. agno/tools/function.py +10 -8
  93. agno/tools/nano_banana.py +1 -1
  94. agno/tracing/__init__.py +12 -0
  95. agno/tracing/exporter.py +157 -0
  96. agno/tracing/schemas.py +276 -0
  97. agno/tracing/setup.py +111 -0
  98. agno/utils/agent.py +4 -4
  99. agno/utils/hooks.py +56 -1
  100. agno/vectordb/qdrant/qdrant.py +22 -22
  101. agno/workflow/condition.py +8 -0
  102. agno/workflow/loop.py +8 -0
  103. agno/workflow/parallel.py +8 -0
  104. agno/workflow/router.py +8 -0
  105. agno/workflow/step.py +20 -0
  106. agno/workflow/steps.py +8 -0
  107. agno/workflow/workflow.py +83 -17
  108. {agno-2.3.4.dist-info → agno-2.3.5.dist-info}/METADATA +2 -2
  109. {agno-2.3.4.dist-info → agno-2.3.5.dist-info}/RECORD +112 -102
  110. {agno-2.3.4.dist-info → agno-2.3.5.dist-info}/WHEEL +0 -0
  111. {agno-2.3.4.dist-info → agno-2.3.5.dist-info}/licenses/LICENSE +0 -0
  112. {agno-2.3.4.dist-info → agno-2.3.5.dist-info}/top_level.txt +0 -0
agno/eval/accuracy.py CHANGED
@@ -612,11 +612,14 @@ Remember: You must only compare the agent_output to the expected_output. The exp
612
612
  print_results: bool = True,
613
613
  ) -> Optional[AccuracyResult]:
614
614
  """Run the evaluation logic against the given answer, instead of generating an answer with the Agent"""
615
+ # Generate unique run_id for this execution (don't modify self.eval_id due to concurrency)
616
+ run_id = str(uuid4())
617
+
615
618
  set_log_level_to_debug() if self.debug_mode else set_log_level_to_info()
616
619
 
617
620
  self.result = AccuracyResult()
618
621
 
619
- logger.debug(f"************ Evaluation Start: {self.eval_id} ************")
622
+ logger.debug(f"************ Evaluation Start: {run_id} ************")
620
623
 
621
624
  evaluator_agent = self.get_evaluator_agent()
622
625
  eval_input = self.get_eval_input()
@@ -721,7 +724,7 @@ Remember: You must only compare the agent_output to the expected_output. The exp
721
724
  ),
722
725
  )
723
726
 
724
- logger.debug(f"*********** Evaluation End: {self.eval_id} ***********")
727
+ logger.debug(f"*********** Evaluation End: {run_id} ***********")
725
728
  return self.result
726
729
 
727
730
  async def arun_with_output(
@@ -732,11 +735,14 @@ Remember: You must only compare the agent_output to the expected_output. The exp
732
735
  print_results: bool = True,
733
736
  ) -> Optional[AccuracyResult]:
734
737
  """Run the evaluation logic against the given answer, instead of generating an answer with the Agent"""
738
+ # Generate unique run_id for this execution (don't modify self.eval_id due to concurrency)
739
+ run_id = str(uuid4())
740
+
735
741
  set_log_level_to_debug() if self.debug_mode else set_log_level_to_info()
736
742
 
737
743
  self.result = AccuracyResult()
738
744
 
739
- logger.debug(f"************ Evaluation Start: {self.eval_id} ************")
745
+ logger.debug(f"************ Evaluation Start: {run_id} ************")
740
746
 
741
747
  evaluator_agent = self.get_evaluator_agent()
742
748
  eval_input = self.get_eval_input()
@@ -820,7 +826,7 @@ Remember: You must only compare the agent_output to the expected_output. The exp
820
826
  eval_input=log_eval_input,
821
827
  )
822
828
 
823
- logger.debug(f"*********** Evaluation End: {self.eval_id} ***********")
829
+ logger.debug(f"*********** Evaluation End: {run_id} ***********")
824
830
  return self.result
825
831
 
826
832
  def _get_telemetry_data(self) -> Dict[str, Any]:
agno/eval/performance.py CHANGED
@@ -498,13 +498,16 @@ class PerformanceEval:
498
498
  from rich.live import Live
499
499
  from rich.status import Status
500
500
 
501
+ # Generate unique run_id for this execution (don't modify self.eval_id due to concurrency)
502
+ run_id = str(uuid4())
503
+
501
504
  run_times = []
502
505
  memory_usages = []
503
506
  previous_snapshot = None
504
507
 
505
508
  self._set_log_level()
506
509
 
507
- log_debug(f"************ Evaluation Start: {self.eval_id} ************")
510
+ log_debug(f"************ Evaluation Start: {run_id} ************")
508
511
 
509
512
  # Add a spinner while running the evaluations
510
513
  console = Console()
@@ -615,7 +618,7 @@ class PerformanceEval:
615
618
  ),
616
619
  )
617
620
 
618
- log_debug(f"*********** Evaluation End: {self.eval_id} ***********")
621
+ log_debug(f"*********** Evaluation End: {run_id} ***********")
619
622
  return self.result
620
623
 
621
624
  async def arun(
@@ -641,13 +644,16 @@ class PerformanceEval:
641
644
  from rich.live import Live
642
645
  from rich.status import Status
643
646
 
647
+ # Generate unique run_id for this execution (don't modify self.eval_id due to concurrency)
648
+ run_id = str(uuid4())
649
+
644
650
  run_times = []
645
651
  memory_usages = []
646
652
  previous_snapshot = None
647
653
 
648
654
  self._set_log_level()
649
655
 
650
- log_debug(f"************ Evaluation Start: {self.eval_id} ************")
656
+ log_debug(f"************ Evaluation Start: {run_id} ************")
651
657
 
652
658
  # Add a spinner while running the evaluations
653
659
  console = Console()
@@ -758,7 +764,7 @@ class PerformanceEval:
758
764
  ),
759
765
  )
760
766
 
761
- log_debug(f"*********** Evaluation End: {self.eval_id} ***********")
767
+ log_debug(f"*********** Evaluation End: {run_id} ***********")
762
768
  return self.result
763
769
 
764
770
  def _get_telemetry_data(self) -> Dict[str, Any]:
agno/eval/reliability.py CHANGED
@@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
4
4
  from uuid import uuid4
5
5
 
6
6
  from agno.db.base import AsyncBaseDb, BaseDb
7
+ from agno.run.team import TeamRunOutput
7
8
 
8
9
  if TYPE_CHECKING:
9
10
  from rich.console import Console
@@ -11,7 +12,6 @@ if TYPE_CHECKING:
11
12
  from agno.agent import RunOutput
12
13
  from agno.db.schemas.evals import EvalType
13
14
  from agno.eval.utils import async_log_eval, log_eval_run, store_result_in_file
14
- from agno.run.team import TeamRunOutput
15
15
  from agno.utils.log import logger
16
16
 
17
17
 
@@ -86,6 +86,9 @@ class ReliabilityEval:
86
86
  from rich.live import Live
87
87
  from rich.status import Status
88
88
 
89
+ # Generate unique run_id for this execution (don't modify self.eval_id due to concurrency)
90
+ run_id = str(uuid4())
91
+
89
92
  # Add a spinner while running the evaluations
90
93
  console = Console()
91
94
  with Live(console=console, transient=True) as live_log:
@@ -118,7 +121,7 @@ class ReliabilityEval:
118
121
  if not tool_name:
119
122
  continue
120
123
  else:
121
- if tool_name not in self.expected_tool_calls: # type: ignore
124
+ if self.expected_tool_calls is not None and tool_name not in self.expected_tool_calls:
122
125
  failed_tool_calls.append(tool_call.get("function", {}).get("name"))
123
126
  else:
124
127
  passed_tool_calls.append(tool_call.get("function", {}).get("name"))
@@ -183,7 +186,7 @@ class ReliabilityEval:
183
186
  ),
184
187
  )
185
188
 
186
- logger.debug(f"*********** Evaluation End: {self.eval_id} ***********")
189
+ logger.debug(f"*********** Evaluation End: {run_id} ***********")
187
190
  return self.result
188
191
 
189
192
  async def arun(self, *, print_results: bool = False) -> Optional[ReliabilityResult]:
@@ -199,6 +202,9 @@ class ReliabilityEval:
199
202
  from rich.live import Live
200
203
  from rich.status import Status
201
204
 
205
+ # Generate unique run_id for this execution (don't modify self.eval_id due to concurrency)
206
+ run_id = str(uuid4())
207
+
202
208
  # Add a spinner while running the evaluations
203
209
  console = Console()
204
210
  with Live(console=console, transient=True) as live_log:
@@ -223,15 +229,18 @@ class ReliabilityEval:
223
229
 
224
230
  failed_tool_calls = []
225
231
  passed_tool_calls = []
226
- for tool_call in actual_tool_calls: # type: ignore
227
- tool_name = tool_call.get("function", {}).get("name")
228
- if not tool_name:
229
- continue
230
- else:
231
- if tool_name not in self.expected_tool_calls: # type: ignore
232
- failed_tool_calls.append(tool_call.get("function", {}).get("name"))
232
+ if not actual_tool_calls:
233
+ failed_tool_calls = self.expected_tool_calls or []
234
+ else:
235
+ for tool_call in actual_tool_calls: # type: ignore
236
+ tool_name = tool_call.get("function", {}).get("name")
237
+ if not tool_name:
238
+ continue
233
239
  else:
234
- passed_tool_calls.append(tool_call.get("function", {}).get("name"))
240
+ if self.expected_tool_calls is not None and tool_name not in self.expected_tool_calls:
241
+ failed_tool_calls.append(tool_call.get("function", {}).get("name"))
242
+ else:
243
+ passed_tool_calls.append(tool_call.get("function", {}).get("name"))
235
244
 
236
245
  self.result = ReliabilityResult(
237
246
  eval_status="PASSED" if len(failed_tool_calls) == 0 else "FAILED",
@@ -244,7 +253,7 @@ class ReliabilityEval:
244
253
  store_result_in_file(
245
254
  file_path=self.file_path_to_save_results,
246
255
  name=self.name,
247
- eval_id=self.eval_id,
256
+ eval_id=run_id,
248
257
  result=self.result,
249
258
  )
250
259
 
@@ -293,7 +302,7 @@ class ReliabilityEval:
293
302
  ),
294
303
  )
295
304
 
296
- logger.debug(f"*********** Evaluation End: {self.eval_id} ***********")
305
+ logger.debug(f"*********** Evaluation End: {run_id} ***********")
297
306
  return self.result
298
307
 
299
308
  def _get_telemetry_data(self) -> Dict[str, Any]:
agno/exceptions.py CHANGED
@@ -77,6 +77,17 @@ class AgnoError(Exception):
77
77
  return str(self.message)
78
78
 
79
79
 
80
+ class ModelAuthenticationError(AgnoError):
81
+ """Raised when model authentication fails."""
82
+
83
+ def __init__(self, message: str, status_code: int = 401, model_name: Optional[str] = None):
84
+ super().__init__(message, status_code)
85
+ self.model_name = model_name
86
+
87
+ self.type = "model_authentication_error"
88
+ self.error_id = "model_authentication_error"
89
+
90
+
80
91
  class ModelProviderError(AgnoError):
81
92
  """Exception raised when a model provider returns an error."""
82
93
 
agno/hooks/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from agno.hooks.decorator import hook, should_run_in_background
2
+
3
+ __all__ = ["hook", "should_run_in_background"]
@@ -0,0 +1,164 @@
1
+ from functools import wraps
2
+ from typing import Any, Callable, TypeVar, Union, overload
3
+
4
+ # Type variable for better type hints
5
+ F = TypeVar("F", bound=Callable[..., Any])
6
+
7
+ # Attribute name used to mark hooks for background execution
8
+ HOOK_RUN_IN_BACKGROUND_ATTR = "_agno_run_in_background"
9
+
10
+
11
+ def _is_async_function(func: Callable) -> bool:
12
+ """
13
+ Check if a function is async, even when wrapped by decorators like @staticmethod.
14
+ Traverses the full wrapper chain to find the original function.
15
+ """
16
+ from inspect import iscoroutinefunction, unwrap
17
+
18
+ # First, try the standard inspect function on the wrapper
19
+ if iscoroutinefunction(func):
20
+ return True
21
+
22
+ # Use unwrap to traverse the full __wrapped__ chain to the original function
23
+ try:
24
+ original_func = unwrap(func)
25
+ if original_func is not func and iscoroutinefunction(original_func):
26
+ return True
27
+ except ValueError:
28
+ # unwrap raises ValueError if it hits a cycle
29
+ pass
30
+
31
+ # Check if the function has CO_COROUTINE flag in its code object
32
+ try:
33
+ if hasattr(func, "__code__") and func.__code__.co_flags & 0x80: # CO_COROUTINE flag
34
+ return True
35
+ except (AttributeError, TypeError):
36
+ pass
37
+
38
+ return False
39
+
40
+
41
+ @overload
42
+ def hook() -> Callable[[F], F]: ...
43
+
44
+
45
+ @overload
46
+ def hook(
47
+ *,
48
+ run_in_background: bool = False,
49
+ ) -> Callable[[F], F]: ...
50
+
51
+
52
+ @overload
53
+ def hook(func: F) -> F: ...
54
+
55
+
56
+ def hook(*args, **kwargs) -> Union[F, Callable[[F], F]]:
57
+ """Decorator to configure hook behavior.
58
+
59
+ Args:
60
+ run_in_background: If True, this hook will be scheduled as a FastAPI background task
61
+ when background_tasks is available, regardless of the agent/team's
62
+ run_hooks_in_background setting. This allows per-hook control over
63
+ background execution. This is only use-able when running with AgentOS.
64
+
65
+ Returns:
66
+ Union[F, Callable[[F], F]]: Decorated function or decorator
67
+
68
+ Examples:
69
+ @hook
70
+ def my_hook(run_output, agent):
71
+ # This runs normally (blocking)
72
+ process_output(run_output.content)
73
+
74
+ @hook()
75
+ def another_hook(run_output, agent):
76
+ # Same as above - runs normally
77
+ process_output(run_output.content)
78
+
79
+ @hook(run_in_background=True)
80
+ def my_background_hook(run_output, agent):
81
+ # This will run in the background when background_tasks is available
82
+ send_notification(run_output.content)
83
+
84
+ @hook(run_in_background=True)
85
+ async def my_async_background_hook(run_output, agent):
86
+ # Async hooks also supported
87
+ await send_async_notification(run_output.content)
88
+
89
+ agent = Agent(
90
+ model=OpenAIChat(id="gpt-4o"),
91
+ post_hooks=[my_hook, my_background_hook],
92
+ )
93
+ """
94
+ # Valid kwargs for the hook decorator
95
+ VALID_KWARGS = frozenset({"run_in_background"})
96
+
97
+ # Validate kwargs
98
+ invalid_kwargs = set(kwargs.keys()) - VALID_KWARGS
99
+ if invalid_kwargs:
100
+ raise ValueError(
101
+ f"Invalid hook configuration arguments: {invalid_kwargs}. Valid arguments are: {sorted(VALID_KWARGS)}"
102
+ )
103
+
104
+ def decorator(func: F) -> F:
105
+ run_in_background = kwargs.get("run_in_background", False)
106
+
107
+ # Preserve existing hook attributes from previously applied decorators
108
+ # Use OR logic: if any decorator sets run_in_background=True, it stays True
109
+ existing_run_in_background = should_run_in_background(func)
110
+ final_run_in_background = run_in_background or existing_run_in_background
111
+
112
+ @wraps(func)
113
+ def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
114
+ return func(*args, **kwargs)
115
+
116
+ @wraps(func)
117
+ async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
118
+ return await func(*args, **kwargs)
119
+
120
+ # Choose appropriate wrapper based on function type
121
+ if _is_async_function(func):
122
+ wrapper = async_wrapper
123
+ else:
124
+ wrapper = sync_wrapper
125
+
126
+ # Set the background execution attribute (combined from all decorators)
127
+ setattr(wrapper, HOOK_RUN_IN_BACKGROUND_ATTR, final_run_in_background)
128
+
129
+ return wrapper # type: ignore
130
+
131
+ # Handle both @hook and @hook() cases
132
+ if len(args) == 1 and callable(args[0]) and not kwargs:
133
+ return decorator(args[0])
134
+
135
+ return decorator
136
+
137
+
138
+ def should_run_in_background(hook_func: Callable) -> bool:
139
+ """
140
+ Check if a hook function is marked to run in background.
141
+ Traverses the wrapper chain to find the attribute when multiple decorators are stacked.
142
+
143
+ Args:
144
+ hook_func: The hook function to check
145
+
146
+ Returns:
147
+ True if the hook is decorated with @hook(run_in_background=True)
148
+ """
149
+ # Check the function directly first
150
+ if hasattr(hook_func, HOOK_RUN_IN_BACKGROUND_ATTR):
151
+ return getattr(hook_func, HOOK_RUN_IN_BACKGROUND_ATTR)
152
+
153
+ # Traverse the wrapper chain to find the attribute
154
+ current = hook_func
155
+ seen: set[int] = set()
156
+ while hasattr(current, "__wrapped__"):
157
+ if id(current) in seen:
158
+ break
159
+ seen.add(id(current))
160
+ current = current.__wrapped__
161
+ if hasattr(current, HOOK_RUN_IN_BACKGROUND_ATTR):
162
+ return getattr(current, HOOK_RUN_IN_BACKGROUND_ATTR)
163
+
164
+ return False
@@ -55,10 +55,10 @@ class SemanticChunking(ChunkingStrategy):
55
55
  # Fallback to model id
56
56
  params["embedding_model"] = getattr(self.embedder, "id", None) or "text-embedding-3-small"
57
57
 
58
- self.chunker = SemanticChunker(**params)
58
+ self.chunker = SemanticChunker(**params) # type: ignore
59
59
  except Exception:
60
60
  # As a final fallback, use the original behavior
61
- self.chunker = SemanticChunker(
61
+ self.chunker = SemanticChunker( # type: ignore
62
62
  embedding_model=getattr(self.embedder, "id", None) or "text-embedding-3-small",
63
63
  chunk_size=self.chunk_size,
64
64
  threshold=self.similarity_threshold,
@@ -2,7 +2,7 @@ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, Optional
4
4
 
5
- from agno.exceptions import ModelProviderError
5
+ from agno.exceptions import ModelAuthenticationError
6
6
  from agno.models.message import Message
7
7
  from agno.models.openai.like import OpenAILike
8
8
 
@@ -39,10 +39,9 @@ class AIMLAPI(OpenAILike):
39
39
  if not self.api_key:
40
40
  self.api_key = getenv("AIMLAPI_API_KEY")
41
41
  if not self.api_key:
42
- raise ModelProviderError(
42
+ raise ModelAuthenticationError(
43
43
  message="AIMLAPI_API_KEY not set. Please set the AIMLAPI_API_KEY environment variable.",
44
44
  model_name=self.name,
45
- model_id=self.id,
46
45
  )
47
46
  return super()._get_client_params()
48
47
 
@@ -129,6 +129,7 @@ class Claude(Model):
129
129
 
130
130
  # Client parameters
131
131
  api_key: Optional[str] = None
132
+ auth_token: Optional[str] = None
132
133
  default_headers: Optional[Dict[str, Any]] = None
133
134
  timeout: Optional[float] = None
134
135
  http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
@@ -153,13 +154,15 @@ class Claude(Model):
153
154
  client_params: Dict[str, Any] = {}
154
155
 
155
156
  self.api_key = self.api_key or getenv("ANTHROPIC_API_KEY")
156
- if not self.api_key:
157
- raise ModelProviderError(
158
- "ANTHROPIC_API_KEY not set. Please set the ANTHROPIC_API_KEY environment variable."
157
+ self.auth_token = self.auth_token or getenv("ANTHROPIC_AUTH_TOKEN")
158
+ if not (self.api_key or self.auth_token):
159
+ log_error(
160
+ "ANTHROPIC_API_KEY or ANTHROPIC_AUTH_TOKEN not set. Please set the ANTHROPIC_API_KEY or ANTHROPIC_AUTH_TOKEN environment variable."
159
161
  )
160
162
 
161
163
  # Add API key to client parameters
162
164
  client_params["api_key"] = self.api_key
165
+ client_params["auth_token"] = self.auth_token
163
166
  if self.timeout is not None:
164
167
  client_params["timeout"] = self.timeout
165
168
 
@@ -179,10 +182,6 @@ class Claude(Model):
179
182
  """
180
183
  # If model is in blacklist, it doesn't support structured outputs
181
184
  if self.id in self.NON_STRUCTURED_OUTPUT_MODELS:
182
- log_warning(
183
- f"Model '{self.id}' does not support structured outputs. "
184
- "Structured output features will not be available for this model."
185
- )
186
185
  return False
187
186
 
188
187
  # Check for legacy model patterns which don't support structured outputs
@@ -211,8 +210,14 @@ class Claude(Model):
211
210
  bool: True if structured outputs are in use
212
211
  """
213
212
  # Check for output_format usage
214
- if response_format is not None and self._supports_structured_outputs():
215
- return True
213
+ if response_format is not None:
214
+ if self._supports_structured_outputs():
215
+ return True
216
+ else:
217
+ log_warning(
218
+ f"Model '{self.id}' does not support structured outputs. "
219
+ "Structured output features will not be available for this model."
220
+ )
216
221
 
217
222
  # Check for strict tools
218
223
  if tools:
@@ -928,8 +933,8 @@ class Claude(Model):
928
933
  elif isinstance(response, (ContentBlockStopEvent, ParsedBetaContentBlockStopEvent)):
929
934
  if response.content_block.type == "tool_use": # type: ignore
930
935
  tool_use = response.content_block # type: ignore
931
- tool_name = tool_use.name
932
- tool_input = tool_use.input
936
+ tool_name = tool_use.name # type: ignore
937
+ tool_input = tool_use.input # type: ignore
933
938
 
934
939
  function_def = {"name": tool_name}
935
940
  if tool_input:
@@ -939,7 +944,7 @@ class Claude(Model):
939
944
 
940
945
  model_response.tool_calls = [
941
946
  {
942
- "id": tool_use.id,
947
+ "id": tool_use.id, # type: ignore
943
948
  "type": "function",
944
949
  "function": function_def,
945
950
  }
@@ -960,7 +965,7 @@ class Claude(Model):
960
965
  for block in response.message.content: # type: ignore
961
966
  # Handle text blocks for structured output parsing
962
967
  if block.type == "text":
963
- accumulated_text += block.text
968
+ accumulated_text += block.text # type: ignore
964
969
 
965
970
  # Handle citations
966
971
  citations = getattr(block, "citations", None)
@@ -5,7 +5,7 @@ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Tuple, Ty
5
5
 
6
6
  from pydantic import BaseModel
7
7
 
8
- from agno.exceptions import AgnoError, ModelProviderError
8
+ from agno.exceptions import ModelProviderError
9
9
  from agno.models.base import Model
10
10
  from agno.models.message import Message
11
11
  from agno.models.metrics import Metrics
@@ -102,9 +102,8 @@ class AwsBedrock(Model):
102
102
  self.client = AwsClient(service_name="bedrock-runtime", region_name=self.aws_region)
103
103
  else:
104
104
  if not self.aws_access_key_id or not self.aws_secret_access_key:
105
- raise AgnoError(
106
- message="AWS credentials not found. Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables or provide a boto3 session.",
107
- status_code=400,
105
+ log_error(
106
+ "AWS credentials not found. Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables or provide a boto3 session."
108
107
  )
109
108
 
110
109
  self.client = AwsClient(
agno/models/aws/claude.py CHANGED
@@ -7,7 +7,7 @@ from pydantic import BaseModel
7
7
 
8
8
  from agno.models.anthropic import Claude as AnthropicClaude
9
9
  from agno.utils.http import get_default_async_client, get_default_sync_client
10
- from agno.utils.log import log_debug, log_warning
10
+ from agno.utils.log import log_debug, log_error, log_warning
11
11
  from agno.utils.models.claude import format_tools_for_model
12
12
 
13
13
  try:
@@ -79,6 +79,10 @@ class Claude(AnthropicClaude):
79
79
  "aws_access_key": self.aws_access_key,
80
80
  "aws_region": self.aws_region,
81
81
  }
82
+ if not (self.aws_access_key or (self.aws_access_key and self.aws_secret_key)):
83
+ log_error(
84
+ "AWS credentials not found. Please either set the AWS_BEDROCK_API_KEY or AWS_ACCESS_KEY and AWS_SECRET_KEY environment variables."
85
+ )
82
86
 
83
87
  if self.timeout is not None:
84
88
  client_params["timeout"] = self.timeout
@@ -136,9 +136,9 @@ class AzureAIFoundry(Model):
136
136
  self.azure_endpoint = self.azure_endpoint or getenv("AZURE_ENDPOINT")
137
137
 
138
138
  if not self.api_key:
139
- raise ValueError("API key is required")
139
+ log_error("AZURE_API_KEY not set. Please set the AZURE_API_KEY environment variable.")
140
140
  if not self.azure_endpoint:
141
- raise ValueError("Endpoint URL is required")
141
+ log_error("AZURE_ENDPOINT not set. Please set the AZURE_ENDPOINT environment variable.")
142
142
 
143
143
  base_params = {
144
144
  "endpoint": self.azure_endpoint,
@@ -4,6 +4,7 @@ from typing import Any, Dict, Optional
4
4
 
5
5
  import httpx
6
6
 
7
+ from agno.exceptions import ModelAuthenticationError
7
8
  from agno.models.openai.like import OpenAILike
8
9
  from agno.utils.http import get_default_async_client, get_default_sync_client
9
10
  from agno.utils.log import log_warning
@@ -63,6 +64,13 @@ class AzureOpenAI(OpenAILike):
63
64
  self.api_key = self.api_key or getenv("AZURE_OPENAI_API_KEY")
64
65
  self.azure_endpoint = self.azure_endpoint or getenv("AZURE_OPENAI_ENDPOINT")
65
66
  self.azure_deployment = self.azure_deployment or getenv("AZURE_OPENAI_DEPLOYMENT")
67
+
68
+ if not self.api_key:
69
+ raise ModelAuthenticationError(
70
+ message="AZURE_OPENAI_API_KEY not set. Please set the AZURE_OPENAI_API_KEY environment variable.",
71
+ model_name=self.name,
72
+ )
73
+
66
74
  params_mapping = {
67
75
  "api_key": self.api_key,
68
76
  "api_version": self.api_version,