agno 2.0.4__py3-none-any.whl → 2.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. agno/agent/agent.py +127 -102
  2. agno/db/dynamo/dynamo.py +9 -7
  3. agno/db/firestore/firestore.py +7 -4
  4. agno/db/gcs_json/gcs_json_db.py +6 -4
  5. agno/db/json/json_db.py +10 -6
  6. agno/db/migrations/v1_to_v2.py +191 -23
  7. agno/db/mongo/mongo.py +67 -6
  8. agno/db/mysql/mysql.py +7 -6
  9. agno/db/mysql/schemas.py +27 -27
  10. agno/db/postgres/postgres.py +7 -6
  11. agno/db/redis/redis.py +3 -3
  12. agno/db/singlestore/singlestore.py +4 -4
  13. agno/db/sqlite/sqlite.py +7 -6
  14. agno/db/utils.py +0 -14
  15. agno/integrations/discord/client.py +1 -0
  16. agno/knowledge/embedder/openai.py +19 -11
  17. agno/knowledge/knowledge.py +11 -10
  18. agno/knowledge/reader/reader_factory.py +7 -3
  19. agno/knowledge/reader/web_search_reader.py +12 -6
  20. agno/knowledge/reader/website_reader.py +33 -16
  21. agno/media.py +70 -0
  22. agno/models/aimlapi/aimlapi.py +2 -2
  23. agno/models/base.py +31 -4
  24. agno/models/cerebras/cerebras_openai.py +2 -2
  25. agno/models/deepinfra/deepinfra.py +2 -2
  26. agno/models/deepseek/deepseek.py +2 -2
  27. agno/models/fireworks/fireworks.py +2 -2
  28. agno/models/internlm/internlm.py +2 -2
  29. agno/models/langdb/langdb.py +4 -4
  30. agno/models/litellm/litellm_openai.py +2 -2
  31. agno/models/message.py +135 -0
  32. agno/models/meta/llama_openai.py +2 -2
  33. agno/models/nebius/nebius.py +2 -2
  34. agno/models/nexus/__init__.py +3 -0
  35. agno/models/nexus/nexus.py +25 -0
  36. agno/models/nvidia/nvidia.py +2 -2
  37. agno/models/openai/responses.py +6 -0
  38. agno/models/openrouter/openrouter.py +2 -2
  39. agno/models/perplexity/perplexity.py +2 -2
  40. agno/models/portkey/portkey.py +3 -3
  41. agno/models/response.py +2 -1
  42. agno/models/sambanova/sambanova.py +2 -2
  43. agno/models/together/together.py +2 -2
  44. agno/models/vercel/v0.py +2 -2
  45. agno/models/xai/xai.py +2 -2
  46. agno/os/app.py +162 -42
  47. agno/os/interfaces/agui/utils.py +98 -134
  48. agno/os/router.py +3 -1
  49. agno/os/routers/health.py +0 -1
  50. agno/os/routers/home.py +52 -0
  51. agno/os/routers/knowledge/knowledge.py +2 -2
  52. agno/os/schema.py +21 -0
  53. agno/os/utils.py +1 -9
  54. agno/run/agent.py +19 -3
  55. agno/run/team.py +18 -3
  56. agno/run/workflow.py +10 -0
  57. agno/team/team.py +70 -45
  58. agno/tools/duckduckgo.py +15 -11
  59. agno/tools/e2b.py +14 -7
  60. agno/tools/file_generation.py +350 -0
  61. agno/tools/function.py +2 -0
  62. agno/tools/googlesearch.py +1 -1
  63. agno/utils/gemini.py +24 -4
  64. agno/utils/string.py +32 -0
  65. agno/utils/tools.py +1 -1
  66. agno/vectordb/chroma/chromadb.py +66 -25
  67. agno/vectordb/lancedb/lance_db.py +15 -4
  68. agno/vectordb/milvus/milvus.py +6 -0
  69. agno/workflow/step.py +4 -3
  70. agno/workflow/workflow.py +4 -0
  71. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/METADATA +9 -5
  72. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/RECORD +75 -72
  73. agno/knowledge/reader/url_reader.py +0 -128
  74. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/WHEEL +0 -0
  75. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/licenses/LICENSE +0 -0
  76. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/top_level.txt +0 -0
agno/models/base.py CHANGED
@@ -21,7 +21,7 @@ from uuid import uuid4
21
21
  from pydantic import BaseModel
22
22
 
23
23
  from agno.exceptions import AgentRunException
24
- from agno.media import Audio, Image, Video
24
+ from agno.media import Audio, File, Image, Video
25
25
  from agno.models.message import Citations, Message
26
26
  from agno.models.metrics import Metrics
27
27
  from agno.models.response import ModelResponse, ModelResponseEvent, ToolExecution
@@ -46,6 +46,7 @@ class MessageData:
46
46
  response_audio: Optional[Audio] = None
47
47
  response_image: Optional[Image] = None
48
48
  response_video: Optional[Video] = None
49
+ response_file: Optional[File] = None
49
50
 
50
51
  # Data from the provider that we might need on subsequent messages
51
52
  response_provider_data: Optional[Dict[str, Any]] = None
@@ -266,6 +267,11 @@ class Model(ABC):
266
267
  model_response.videos = []
267
268
  model_response.videos.extend(function_call_response.videos)
268
269
 
270
+ if function_call_response.files is not None:
271
+ if model_response.files is None:
272
+ model_response.files = []
273
+ model_response.files.extend(function_call_response.files)
274
+
269
275
  if (
270
276
  function_call_response.event
271
277
  in [
@@ -293,7 +299,7 @@ class Model(ABC):
293
299
  messages=messages, function_call_results=function_call_results, **model_response.extra or {}
294
300
  )
295
301
 
296
- if any(msg.images or msg.videos or msg.audio for msg in function_call_results):
302
+ if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
297
303
  # Handle function call media
298
304
  self._handle_function_call_media(messages=messages, function_call_results=function_call_results)
299
305
 
@@ -402,6 +408,11 @@ class Model(ABC):
402
408
  model_response.videos = []
403
409
  model_response.videos.extend(function_call_response.videos)
404
410
 
411
+ if function_call_response.files is not None:
412
+ if model_response.files is None:
413
+ model_response.files = []
414
+ model_response.files.extend(function_call_response.files)
415
+
405
416
  if (
406
417
  function_call_response.event
407
418
  in [
@@ -428,7 +439,7 @@ class Model(ABC):
428
439
  messages=messages, function_call_results=function_call_results, **model_response.extra or {}
429
440
  )
430
441
 
431
- if any(msg.images or msg.videos or msg.audio for msg in function_call_results):
442
+ if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
432
443
  # Handle function call media
433
444
  self._handle_function_call_media(messages=messages, function_call_results=function_call_results)
434
445
 
@@ -607,6 +618,10 @@ class Model(ABC):
607
618
  if provider_response.videos:
608
619
  assistant_message.video_output = provider_response.videos[-1] # Taking last (most recent) video
609
620
 
621
+ if provider_response.files is not None:
622
+ if provider_response.files:
623
+ assistant_message.file_output = provider_response.files[-1] # Taking last (most recent) file
624
+
610
625
  if provider_response.audios is not None:
611
626
  if provider_response.audios:
612
627
  assistant_message.audio_output = provider_response.audios[-1] # Taking last (most recent) audio
@@ -1213,6 +1228,8 @@ class Model(ABC):
1213
1228
  function_execution_result.videos = tool_result.videos
1214
1229
  if tool_result.audios:
1215
1230
  function_execution_result.audios = tool_result.audios
1231
+ if tool_result.files:
1232
+ function_execution_result.files = tool_result.files
1216
1233
  else:
1217
1234
  function_call_output = str(function_execution_result.result) if function_execution_result.result else ""
1218
1235
 
@@ -1246,6 +1263,7 @@ class Model(ABC):
1246
1263
  images=function_execution_result.images,
1247
1264
  videos=function_execution_result.videos,
1248
1265
  audios=function_execution_result.audios,
1266
+ files=function_execution_result.files,
1249
1267
  )
1250
1268
 
1251
1269
  # Add function call to function call results
@@ -1617,6 +1635,8 @@ class Model(ABC):
1617
1635
  function_execution_result.videos = tool_result.videos
1618
1636
  if tool_result.audios:
1619
1637
  function_execution_result.audios = tool_result.audios
1638
+ if tool_result.files:
1639
+ function_execution_result.files = tool_result.files
1620
1640
  else:
1621
1641
  function_call_output = str(function_call.result)
1622
1642
 
@@ -1649,6 +1669,7 @@ class Model(ABC):
1649
1669
  images=function_execution_result.images,
1650
1670
  videos=function_execution_result.videos,
1651
1671
  audios=function_execution_result.audios,
1672
+ files=function_execution_result.files,
1652
1673
  )
1653
1674
 
1654
1675
  # Add function call result to function call results
@@ -1698,6 +1719,7 @@ class Model(ABC):
1698
1719
  all_images: List[Image] = []
1699
1720
  all_videos: List[Video] = []
1700
1721
  all_audio: List[Audio] = []
1722
+ all_files: List[File] = []
1701
1723
 
1702
1724
  for result_message in function_call_results:
1703
1725
  if result_message.images:
@@ -1713,15 +1735,20 @@ class Model(ABC):
1713
1735
  all_audio.extend(result_message.audio)
1714
1736
  result_message.audio = None
1715
1737
 
1738
+ if result_message.files:
1739
+ all_files.extend(result_message.files)
1740
+ result_message.files = None
1741
+
1716
1742
  # If we have media artifacts, add a follow-up "user" message instead of a "tool"
1717
1743
  # message with the media artifacts which throws error for some models
1718
- if all_images or all_videos or all_audio:
1744
+ if all_images or all_videos or all_audio or all_files:
1719
1745
  media_message = Message(
1720
1746
  role="user",
1721
1747
  content="Take note of the following content",
1722
1748
  images=all_images if all_images else None,
1723
1749
  videos=all_videos if all_videos else None,
1724
1750
  audio=all_audio if all_audio else None,
1751
+ files=all_files if all_files else None,
1725
1752
  )
1726
1753
  messages.append(media_message)
1727
1754
 
@@ -1,5 +1,5 @@
1
1
  import json
2
- from dataclasses import dataclass
2
+ from dataclasses import dataclass, field
3
3
  from os import getenv
4
4
  from typing import Any, Dict, List, Optional, Type, Union
5
5
 
@@ -18,7 +18,7 @@ class CerebrasOpenAI(OpenAILike):
18
18
 
19
19
  parallel_tool_calls: Optional[bool] = None
20
20
  base_url: str = "https://api.cerebras.ai/v1"
21
- api_key: Optional[str] = getenv("CEREBRAS_API_KEY", None)
21
+ api_key: Optional[str] = field(default_factory=lambda: getenv("CEREBRAS_API_KEY", None))
22
22
 
23
23
  def get_request_params(
24
24
  self,
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -22,7 +22,7 @@ class DeepInfra(OpenAILike):
22
22
  name: str = "DeepInfra"
23
23
  provider: str = "DeepInfra"
24
24
 
25
- api_key: Optional[str] = getenv("DEEPINFRA_API_KEY")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("DEEPINFRA_API_KEY"))
26
26
  base_url: str = "https://api.deepinfra.com/v1/openai"
27
27
 
28
28
  supports_native_structured_outputs: bool = False
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, Optional
4
4
 
@@ -23,7 +23,7 @@ class DeepSeek(OpenAILike):
23
23
  name: str = "DeepSeek"
24
24
  provider: str = "DeepSeek"
25
25
 
26
- api_key: Optional[str] = getenv("DEEPSEEK_API_KEY")
26
+ api_key: Optional[str] = field(default_factory=lambda: getenv("DEEPSEEK_API_KEY"))
27
27
  base_url: str = "https://api.deepseek.com"
28
28
 
29
29
  # Their support for structured outputs is currently broken
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -22,5 +22,5 @@ class Fireworks(OpenAILike):
22
22
  name: str = "Fireworks"
23
23
  provider: str = "Fireworks"
24
24
 
25
- api_key: Optional[str] = getenv("FIREWORKS_API_KEY")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("FIREWORKS_API_KEY"))
26
26
  base_url: str = "https://api.fireworks.ai/inference/v1"
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -22,5 +22,5 @@ class InternLM(OpenAILike):
22
22
  name: str = "InternLM"
23
23
  provider: str = "InternLM"
24
24
 
25
- api_key: Optional[str] = getenv("INTERNLM_API_KEY")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("INTERNLM_API_KEY"))
26
26
  base_url: Optional[str] = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/chat/completions"
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, Optional
4
4
 
@@ -22,10 +22,10 @@ class LangDB(OpenAILike):
22
22
  name: str = "LangDB"
23
23
  provider: str = "LangDB"
24
24
 
25
- api_key: Optional[str] = getenv("LANGDB_API_KEY")
26
- project_id: Optional[str] = getenv("LANGDB_PROJECT_ID")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("LANGDB_API_KEY"))
26
+ project_id: Optional[str] = field(default_factory=lambda: getenv("LANGDB_PROJECT_ID"))
27
27
 
28
- base_host_url: str = getenv("LANGDB_API_BASE_URL", "https://api.us-east-1.langdb.ai")
28
+ base_host_url: str = field(default_factory=lambda: getenv("LANGDB_API_BASE_URL", "https://api.us-east-1.langdb.ai"))
29
29
 
30
30
  base_url: Optional[str] = None
31
31
  label: Optional[str] = None
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -21,5 +21,5 @@ class LiteLLMOpenAI(OpenAILike):
21
21
  name: str = "LiteLLM"
22
22
  provider: str = "LiteLLM"
23
23
 
24
- api_key: Optional[str] = getenv("LITELLM_API_KEY")
24
+ api_key: Optional[str] = field(default_factory=lambda: getenv("LITELLM_API_KEY"))
25
25
  base_url: str = "http://0.0.0.0:4000"
agno/models/message.py CHANGED
@@ -74,6 +74,7 @@ class Message(BaseModel):
74
74
  audio_output: Optional[Audio] = None
75
75
  image_output: Optional[Image] = None
76
76
  video_output: Optional[Video] = None
77
+ file_output: Optional[File] = None
77
78
 
78
79
  # The thinking content from the model
79
80
  redacted_reasoning_content: Optional[str] = None
@@ -121,6 +122,138 @@ class Message(BaseModel):
121
122
 
122
123
  @classmethod
123
124
  def from_dict(cls, data: Dict[str, Any]) -> "Message":
125
+ # Handle image reconstruction properly
126
+ if "images" in data and data["images"]:
127
+ reconstructed_images = []
128
+ for i, img_data in enumerate(data["images"]):
129
+ if isinstance(img_data, dict):
130
+ # If content is base64, decode it back to bytes
131
+ if "content" in img_data and isinstance(img_data["content"], str):
132
+ reconstructed_images.append(
133
+ Image.from_base64(
134
+ img_data["content"],
135
+ id=img_data.get("id"),
136
+ mime_type=img_data.get("mime_type"),
137
+ format=img_data.get("format"),
138
+ )
139
+ )
140
+ else:
141
+ # Regular image (filepath/url)
142
+ reconstructed_images.append(Image(**img_data))
143
+ else:
144
+ reconstructed_images.append(img_data)
145
+ data["images"] = reconstructed_images
146
+
147
+ # Handle audio reconstruction properly
148
+ if "audio" in data and data["audio"]:
149
+ reconstructed_audio = []
150
+ for i, aud_data in enumerate(data["audio"]):
151
+ if isinstance(aud_data, dict):
152
+ # If content is base64, decode it back to bytes
153
+ if "content" in aud_data and isinstance(aud_data["content"], str):
154
+ reconstructed_audio.append(
155
+ Audio.from_base64(
156
+ aud_data["content"],
157
+ id=aud_data.get("id"),
158
+ mime_type=aud_data.get("mime_type"),
159
+ transcript=aud_data.get("transcript"),
160
+ expires_at=aud_data.get("expires_at"),
161
+ sample_rate=aud_data.get("sample_rate", 24000),
162
+ channels=aud_data.get("channels", 1),
163
+ )
164
+ )
165
+ else:
166
+ reconstructed_audio.append(Audio(**aud_data))
167
+ else:
168
+ reconstructed_audio.append(aud_data)
169
+ data["audio"] = reconstructed_audio
170
+
171
+ # Handle video reconstruction properly
172
+ if "videos" in data and data["videos"]:
173
+ reconstructed_videos = []
174
+ for i, vid_data in enumerate(data["videos"]):
175
+ if isinstance(vid_data, dict):
176
+ # If content is base64, decode it back to bytes
177
+ if "content" in vid_data and isinstance(vid_data["content"], str):
178
+ reconstructed_videos.append(
179
+ Video.from_base64(
180
+ vid_data["content"],
181
+ id=vid_data.get("id"),
182
+ mime_type=vid_data.get("mime_type"),
183
+ format=vid_data.get("format"),
184
+ )
185
+ )
186
+ else:
187
+ reconstructed_videos.append(Video(**vid_data))
188
+ else:
189
+ reconstructed_videos.append(vid_data)
190
+ data["videos"] = reconstructed_videos
191
+
192
+ # Handle file reconstruction properly
193
+ if "files" in data and data["files"]:
194
+ reconstructed_files = []
195
+ for i, file_data in enumerate(data["files"]):
196
+ if isinstance(file_data, dict):
197
+ # If content is base64, decode it back to bytes
198
+ if "content" in file_data and isinstance(file_data["content"], str):
199
+ reconstructed_files.append(
200
+ File.from_base64(
201
+ file_data["content"],
202
+ id=file_data.get("id"),
203
+ mime_type=file_data.get("mime_type"),
204
+ filename=file_data.get("filename"),
205
+ name=file_data.get("name"),
206
+ format=file_data.get("format"),
207
+ )
208
+ )
209
+ else:
210
+ reconstructed_files.append(File(**file_data))
211
+ else:
212
+ reconstructed_files.append(file_data)
213
+ data["files"] = reconstructed_files
214
+
215
+ if "audio_output" in data and data["audio_output"]:
216
+ aud_data = data["audio_output"]
217
+ if isinstance(aud_data, dict):
218
+ if "content" in aud_data and isinstance(aud_data["content"], str):
219
+ data["audio_output"] = Audio.from_base64(
220
+ aud_data["content"],
221
+ id=aud_data.get("id"),
222
+ mime_type=aud_data.get("mime_type"),
223
+ transcript=aud_data.get("transcript"),
224
+ expires_at=aud_data.get("expires_at"),
225
+ sample_rate=aud_data.get("sample_rate", 24000),
226
+ channels=aud_data.get("channels", 1),
227
+ )
228
+ else:
229
+ data["audio_output"] = Audio(**aud_data)
230
+
231
+ if "image_output" in data and data["image_output"]:
232
+ img_data = data["image_output"]
233
+ if isinstance(img_data, dict):
234
+ if "content" in img_data and isinstance(img_data["content"], str):
235
+ data["image_output"] = Image.from_base64(
236
+ img_data["content"],
237
+ id=img_data.get("id"),
238
+ mime_type=img_data.get("mime_type"),
239
+ format=img_data.get("format"),
240
+ )
241
+ else:
242
+ data["image_output"] = Image(**img_data)
243
+
244
+ if "video_output" in data and data["video_output"]:
245
+ vid_data = data["video_output"]
246
+ if isinstance(vid_data, dict):
247
+ if "content" in vid_data and isinstance(vid_data["content"], str):
248
+ data["video_output"] = Video.from_base64(
249
+ vid_data["content"],
250
+ id=vid_data.get("id"),
251
+ mime_type=vid_data.get("mime_type"),
252
+ format=vid_data.get("format"),
253
+ )
254
+ else:
255
+ data["video_output"] = Video(**vid_data)
256
+
124
257
  return cls(**data)
125
258
 
126
259
  def to_dict(self) -> Dict[str, Any]:
@@ -152,6 +285,8 @@ class Message(BaseModel):
152
285
  message_dict["audio"] = [aud.to_dict() for aud in self.audio]
153
286
  if self.videos:
154
287
  message_dict["videos"] = [vid.to_dict() for vid in self.videos]
288
+ if self.files:
289
+ message_dict["files"] = [file.to_dict() for file in self.files]
155
290
  if self.audio_output:
156
291
  message_dict["audio_output"] = self.audio_output.to_dict()
157
292
 
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, Optional
4
4
 
@@ -31,7 +31,7 @@ class LlamaOpenAI(OpenAILike):
31
31
  name: str = "LlamaOpenAI"
32
32
  provider: str = "LlamaOpenAI"
33
33
 
34
- api_key: Optional[str] = getenv("LLAMA_API_KEY")
34
+ api_key: Optional[str] = field(default_factory=lambda: getenv("LLAMA_API_KEY"))
35
35
  base_url: Optional[str] = "https://api.llama.com/compat/v1/"
36
36
 
37
37
  # Request parameters
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, Optional
4
4
 
@@ -23,7 +23,7 @@ class Nebius(OpenAILike):
23
23
  name: str = "Nebius"
24
24
  provider: str = "Nebius"
25
25
 
26
- api_key: Optional[str] = getenv("NEBIUS_API_KEY")
26
+ api_key: Optional[str] = field(default_factory=lambda: getenv("NEBIUS_API_KEY"))
27
27
  base_url: str = "https://api.studio.nebius.com/v1/"
28
28
 
29
29
  def _get_client_params(self) -> Dict[str, Any]:
@@ -0,0 +1,3 @@
1
+ from agno.models.nexus.nexus import Nexus
2
+
3
+ __all__ = ["Nexus"]
@@ -0,0 +1,25 @@
1
+ from dataclasses import dataclass
2
+
3
+ from agno.models.openai.like import OpenAILike
4
+
5
+
6
+ @dataclass
7
+ class Nexus(OpenAILike):
8
+ """
9
+ A class for interacting with Nvidia models.
10
+
11
+ Attributes:
12
+ id (str): The id of the Nexus model to use. Default is "nvidia/llama-3.1-nemotron-70b-instruct".
13
+ name (str): The name of this chat model instance. Default is "Nexus"
14
+ provider (str): The provider of the model. Default is "Nexus".
15
+ api_key (str): The api key to authorize request to Nexus.
16
+ base_url (str): The base url to which the requests are sent.
17
+ """
18
+
19
+ id: str = "openai/gpt-4"
20
+ name: str = "Nexus"
21
+ provider: str = "Nexus"
22
+
23
+ base_url: str = "http://localhost:8000/llm/v1/"
24
+
25
+ supports_native_structured_outputs: bool = False
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -22,7 +22,7 @@ class Nvidia(OpenAILike):
22
22
  name: str = "Nvidia"
23
23
  provider: str = "Nvidia"
24
24
 
25
- api_key: Optional[str] = getenv("NVIDIA_API_KEY")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("NVIDIA_API_KEY"))
26
26
  base_url: str = "https://integrate.api.nvidia.com/v1"
27
27
 
28
28
  supports_native_structured_outputs: bool = False
@@ -1088,4 +1088,10 @@ class OpenAIResponses(Model):
1088
1088
  metrics.output_tokens = response_usage.output_tokens or 0
1089
1089
  metrics.total_tokens = response_usage.total_tokens or 0
1090
1090
 
1091
+ if input_tokens_details := response_usage.input_tokens_details:
1092
+ metrics.cache_read_tokens = input_tokens_details.cached_tokens
1093
+
1094
+ if output_tokens_details := response_usage.output_tokens_details:
1095
+ metrics.reasoning_tokens = output_tokens_details.reasoning_tokens
1096
+
1091
1097
  return metrics
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -23,6 +23,6 @@ class OpenRouter(OpenAILike):
23
23
  name: str = "OpenRouter"
24
24
  provider: str = "OpenRouter"
25
25
 
26
- api_key: Optional[str] = getenv("OPENROUTER_API_KEY")
26
+ api_key: Optional[str] = field(default_factory=lambda: getenv("OPENROUTER_API_KEY"))
27
27
  base_url: str = "https://openrouter.ai/api/v1"
28
28
  max_tokens: int = 1024
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, List, Optional, Type, Union
4
4
 
@@ -42,7 +42,7 @@ class Perplexity(OpenAILike):
42
42
  name: str = "Perplexity"
43
43
  provider: str = "Perplexity"
44
44
 
45
- api_key: Optional[str] = getenv("PERPLEXITY_API_KEY")
45
+ api_key: Optional[str] = field(default_factory=lambda: getenv("PERPLEXITY_API_KEY"))
46
46
  base_url: str = "https://api.perplexity.ai/"
47
47
  max_tokens: int = 1024
48
48
  top_k: Optional[float] = None
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, Optional, cast
4
4
 
@@ -30,8 +30,8 @@ class Portkey(OpenAILike):
30
30
  name: str = "Portkey"
31
31
  provider: str = "Portkey"
32
32
 
33
- portkey_api_key: Optional[str] = getenv("PORTKEY_API_KEY")
34
- virtual_key: Optional[str] = getenv("PORTKEY_VIRTUAL_KEY")
33
+ portkey_api_key: Optional[str] = field(default_factory=lambda: getenv("PORTKEY_API_KEY"))
34
+ virtual_key: Optional[str] = field(default_factory=lambda: getenv("PORTKEY_VIRTUAL_KEY"))
35
35
  config: Optional[Dict[str, Any]] = None
36
36
  base_url: str = PORTKEY_GATEWAY_URL
37
37
 
agno/models/response.py CHANGED
@@ -3,7 +3,7 @@ from enum import Enum
3
3
  from time import time
4
4
  from typing import Any, Dict, List, Optional
5
5
 
6
- from agno.media import Audio, Image, Video
6
+ from agno.media import Audio, File, Image, Video
7
7
  from agno.models.message import Citations
8
8
  from agno.models.metrics import Metrics
9
9
  from agno.tools.function import UserInputField
@@ -98,6 +98,7 @@ class ModelResponse:
98
98
  images: Optional[List[Image]] = None
99
99
  videos: Optional[List[Video]] = None
100
100
  audios: Optional[List[Audio]] = None
101
+ files: Optional[List[File]] = None
101
102
 
102
103
  # Model tool calls
103
104
  tool_calls: List[Dict[str, Any]] = field(default_factory=list)
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -22,7 +22,7 @@ class Sambanova(OpenAILike):
22
22
  name: str = "Sambanova"
23
23
  provider: str = "Sambanova"
24
24
 
25
- api_key: Optional[str] = getenv("SAMBANOVA_API_KEY")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("SAMBANOVA_API_KEY"))
26
26
  base_url: str = "https://api.sambanova.ai/v1"
27
27
 
28
28
  supports_native_structured_outputs: bool = False
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -21,5 +21,5 @@ class Together(OpenAILike):
21
21
  id: str = "mistralai/Mixtral-8x7B-Instruct-v0.1"
22
22
  name: str = "Together"
23
23
  provider: str = "Together"
24
- api_key: Optional[str] = getenv("TOGETHER_API_KEY")
24
+ api_key: Optional[str] = field(default_factory=lambda: getenv("TOGETHER_API_KEY"))
25
25
  base_url: str = "https://api.together.xyz/v1"
agno/models/vercel/v0.py CHANGED
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Optional
4
4
 
@@ -22,5 +22,5 @@ class V0(OpenAILike):
22
22
  name: str = "v0"
23
23
  provider: str = "Vercel"
24
24
 
25
- api_key: Optional[str] = getenv("V0_API_KEY")
25
+ api_key: Optional[str] = field(default_factory=lambda: getenv("V0_API_KEY"))
26
26
  base_url: str = "https://api.v0.dev/v1/"
agno/models/xai/xai.py CHANGED
@@ -1,4 +1,4 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from os import getenv
3
3
  from typing import Any, Dict, List, Optional, Type, Union
4
4
 
@@ -34,7 +34,7 @@ class xAI(OpenAILike):
34
34
  name: str = "xAI"
35
35
  provider: str = "xAI"
36
36
 
37
- api_key: Optional[str] = getenv("XAI_API_KEY")
37
+ api_key: Optional[str] = field(default_factory=lambda: getenv("XAI_API_KEY"))
38
38
  base_url: str = "https://api.x.ai/v1"
39
39
 
40
40
  search_parameters: Optional[Dict[str, Any]] = None