agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (314) hide show
  1. agno/agent/agent.py +5540 -2273
  2. agno/api/api.py +2 -0
  3. agno/api/os.py +1 -1
  4. agno/compression/__init__.py +3 -0
  5. agno/compression/manager.py +247 -0
  6. agno/culture/__init__.py +3 -0
  7. agno/culture/manager.py +956 -0
  8. agno/db/async_postgres/__init__.py +3 -0
  9. agno/db/base.py +689 -6
  10. agno/db/dynamo/dynamo.py +933 -37
  11. agno/db/dynamo/schemas.py +174 -10
  12. agno/db/dynamo/utils.py +63 -4
  13. agno/db/firestore/firestore.py +831 -9
  14. agno/db/firestore/schemas.py +51 -0
  15. agno/db/firestore/utils.py +102 -4
  16. agno/db/gcs_json/gcs_json_db.py +660 -12
  17. agno/db/gcs_json/utils.py +60 -26
  18. agno/db/in_memory/in_memory_db.py +287 -14
  19. agno/db/in_memory/utils.py +60 -2
  20. agno/db/json/json_db.py +590 -14
  21. agno/db/json/utils.py +60 -26
  22. agno/db/migrations/manager.py +199 -0
  23. agno/db/migrations/v1_to_v2.py +43 -13
  24. agno/db/migrations/versions/__init__.py +0 -0
  25. agno/db/migrations/versions/v2_3_0.py +938 -0
  26. agno/db/mongo/__init__.py +15 -1
  27. agno/db/mongo/async_mongo.py +2760 -0
  28. agno/db/mongo/mongo.py +879 -11
  29. agno/db/mongo/schemas.py +42 -0
  30. agno/db/mongo/utils.py +80 -8
  31. agno/db/mysql/__init__.py +2 -1
  32. agno/db/mysql/async_mysql.py +2912 -0
  33. agno/db/mysql/mysql.py +946 -68
  34. agno/db/mysql/schemas.py +72 -10
  35. agno/db/mysql/utils.py +198 -7
  36. agno/db/postgres/__init__.py +2 -1
  37. agno/db/postgres/async_postgres.py +2579 -0
  38. agno/db/postgres/postgres.py +942 -57
  39. agno/db/postgres/schemas.py +81 -18
  40. agno/db/postgres/utils.py +164 -2
  41. agno/db/redis/redis.py +671 -7
  42. agno/db/redis/schemas.py +50 -0
  43. agno/db/redis/utils.py +65 -7
  44. agno/db/schemas/__init__.py +2 -1
  45. agno/db/schemas/culture.py +120 -0
  46. agno/db/schemas/evals.py +1 -0
  47. agno/db/schemas/memory.py +17 -2
  48. agno/db/singlestore/schemas.py +63 -0
  49. agno/db/singlestore/singlestore.py +949 -83
  50. agno/db/singlestore/utils.py +60 -2
  51. agno/db/sqlite/__init__.py +2 -1
  52. agno/db/sqlite/async_sqlite.py +2911 -0
  53. agno/db/sqlite/schemas.py +62 -0
  54. agno/db/sqlite/sqlite.py +965 -46
  55. agno/db/sqlite/utils.py +169 -8
  56. agno/db/surrealdb/__init__.py +3 -0
  57. agno/db/surrealdb/metrics.py +292 -0
  58. agno/db/surrealdb/models.py +334 -0
  59. agno/db/surrealdb/queries.py +71 -0
  60. agno/db/surrealdb/surrealdb.py +1908 -0
  61. agno/db/surrealdb/utils.py +147 -0
  62. agno/db/utils.py +2 -0
  63. agno/eval/__init__.py +10 -0
  64. agno/eval/accuracy.py +75 -55
  65. agno/eval/agent_as_judge.py +861 -0
  66. agno/eval/base.py +29 -0
  67. agno/eval/performance.py +16 -7
  68. agno/eval/reliability.py +28 -16
  69. agno/eval/utils.py +35 -17
  70. agno/exceptions.py +27 -2
  71. agno/filters.py +354 -0
  72. agno/guardrails/prompt_injection.py +1 -0
  73. agno/hooks/__init__.py +3 -0
  74. agno/hooks/decorator.py +164 -0
  75. agno/integrations/discord/client.py +1 -1
  76. agno/knowledge/chunking/agentic.py +13 -10
  77. agno/knowledge/chunking/fixed.py +4 -1
  78. agno/knowledge/chunking/semantic.py +9 -4
  79. agno/knowledge/chunking/strategy.py +59 -15
  80. agno/knowledge/embedder/fastembed.py +1 -1
  81. agno/knowledge/embedder/nebius.py +1 -1
  82. agno/knowledge/embedder/ollama.py +8 -0
  83. agno/knowledge/embedder/openai.py +8 -8
  84. agno/knowledge/embedder/sentence_transformer.py +6 -2
  85. agno/knowledge/embedder/vllm.py +262 -0
  86. agno/knowledge/knowledge.py +1618 -318
  87. agno/knowledge/reader/base.py +6 -2
  88. agno/knowledge/reader/csv_reader.py +8 -10
  89. agno/knowledge/reader/docx_reader.py +5 -6
  90. agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
  91. agno/knowledge/reader/json_reader.py +5 -4
  92. agno/knowledge/reader/markdown_reader.py +8 -8
  93. agno/knowledge/reader/pdf_reader.py +17 -19
  94. agno/knowledge/reader/pptx_reader.py +101 -0
  95. agno/knowledge/reader/reader_factory.py +32 -3
  96. agno/knowledge/reader/s3_reader.py +3 -3
  97. agno/knowledge/reader/tavily_reader.py +193 -0
  98. agno/knowledge/reader/text_reader.py +22 -10
  99. agno/knowledge/reader/web_search_reader.py +1 -48
  100. agno/knowledge/reader/website_reader.py +10 -10
  101. agno/knowledge/reader/wikipedia_reader.py +33 -1
  102. agno/knowledge/types.py +1 -0
  103. agno/knowledge/utils.py +72 -7
  104. agno/media.py +22 -6
  105. agno/memory/__init__.py +14 -1
  106. agno/memory/manager.py +544 -83
  107. agno/memory/strategies/__init__.py +15 -0
  108. agno/memory/strategies/base.py +66 -0
  109. agno/memory/strategies/summarize.py +196 -0
  110. agno/memory/strategies/types.py +37 -0
  111. agno/models/aimlapi/aimlapi.py +17 -0
  112. agno/models/anthropic/claude.py +515 -40
  113. agno/models/aws/bedrock.py +102 -21
  114. agno/models/aws/claude.py +131 -274
  115. agno/models/azure/ai_foundry.py +41 -19
  116. agno/models/azure/openai_chat.py +39 -8
  117. agno/models/base.py +1249 -525
  118. agno/models/cerebras/cerebras.py +91 -21
  119. agno/models/cerebras/cerebras_openai.py +21 -2
  120. agno/models/cohere/chat.py +40 -6
  121. agno/models/cometapi/cometapi.py +18 -1
  122. agno/models/dashscope/dashscope.py +2 -3
  123. agno/models/deepinfra/deepinfra.py +18 -1
  124. agno/models/deepseek/deepseek.py +69 -3
  125. agno/models/fireworks/fireworks.py +18 -1
  126. agno/models/google/gemini.py +877 -80
  127. agno/models/google/utils.py +22 -0
  128. agno/models/groq/groq.py +51 -18
  129. agno/models/huggingface/huggingface.py +17 -6
  130. agno/models/ibm/watsonx.py +16 -6
  131. agno/models/internlm/internlm.py +18 -1
  132. agno/models/langdb/langdb.py +13 -1
  133. agno/models/litellm/chat.py +44 -9
  134. agno/models/litellm/litellm_openai.py +18 -1
  135. agno/models/message.py +28 -5
  136. agno/models/meta/llama.py +47 -14
  137. agno/models/meta/llama_openai.py +22 -17
  138. agno/models/mistral/mistral.py +8 -4
  139. agno/models/nebius/nebius.py +6 -7
  140. agno/models/nvidia/nvidia.py +20 -3
  141. agno/models/ollama/chat.py +24 -8
  142. agno/models/openai/chat.py +104 -29
  143. agno/models/openai/responses.py +101 -81
  144. agno/models/openrouter/openrouter.py +60 -3
  145. agno/models/perplexity/perplexity.py +17 -1
  146. agno/models/portkey/portkey.py +7 -6
  147. agno/models/requesty/requesty.py +24 -4
  148. agno/models/response.py +73 -2
  149. agno/models/sambanova/sambanova.py +20 -3
  150. agno/models/siliconflow/siliconflow.py +19 -2
  151. agno/models/together/together.py +20 -3
  152. agno/models/utils.py +254 -8
  153. agno/models/vercel/v0.py +20 -3
  154. agno/models/vertexai/__init__.py +0 -0
  155. agno/models/vertexai/claude.py +190 -0
  156. agno/models/vllm/vllm.py +19 -14
  157. agno/models/xai/xai.py +19 -2
  158. agno/os/app.py +549 -152
  159. agno/os/auth.py +190 -3
  160. agno/os/config.py +23 -0
  161. agno/os/interfaces/a2a/router.py +8 -11
  162. agno/os/interfaces/a2a/utils.py +1 -1
  163. agno/os/interfaces/agui/router.py +18 -3
  164. agno/os/interfaces/agui/utils.py +152 -39
  165. agno/os/interfaces/slack/router.py +55 -37
  166. agno/os/interfaces/slack/slack.py +9 -1
  167. agno/os/interfaces/whatsapp/router.py +0 -1
  168. agno/os/interfaces/whatsapp/security.py +3 -1
  169. agno/os/mcp.py +110 -52
  170. agno/os/middleware/__init__.py +2 -0
  171. agno/os/middleware/jwt.py +676 -112
  172. agno/os/router.py +40 -1478
  173. agno/os/routers/agents/__init__.py +3 -0
  174. agno/os/routers/agents/router.py +599 -0
  175. agno/os/routers/agents/schema.py +261 -0
  176. agno/os/routers/evals/evals.py +96 -39
  177. agno/os/routers/evals/schemas.py +65 -33
  178. agno/os/routers/evals/utils.py +80 -10
  179. agno/os/routers/health.py +10 -4
  180. agno/os/routers/knowledge/knowledge.py +196 -38
  181. agno/os/routers/knowledge/schemas.py +82 -22
  182. agno/os/routers/memory/memory.py +279 -52
  183. agno/os/routers/memory/schemas.py +46 -17
  184. agno/os/routers/metrics/metrics.py +20 -8
  185. agno/os/routers/metrics/schemas.py +16 -16
  186. agno/os/routers/session/session.py +462 -34
  187. agno/os/routers/teams/__init__.py +3 -0
  188. agno/os/routers/teams/router.py +512 -0
  189. agno/os/routers/teams/schema.py +257 -0
  190. agno/os/routers/traces/__init__.py +3 -0
  191. agno/os/routers/traces/schemas.py +414 -0
  192. agno/os/routers/traces/traces.py +499 -0
  193. agno/os/routers/workflows/__init__.py +3 -0
  194. agno/os/routers/workflows/router.py +624 -0
  195. agno/os/routers/workflows/schema.py +75 -0
  196. agno/os/schema.py +256 -693
  197. agno/os/scopes.py +469 -0
  198. agno/os/utils.py +514 -36
  199. agno/reasoning/anthropic.py +80 -0
  200. agno/reasoning/gemini.py +73 -0
  201. agno/reasoning/openai.py +5 -0
  202. agno/reasoning/vertexai.py +76 -0
  203. agno/run/__init__.py +6 -0
  204. agno/run/agent.py +155 -32
  205. agno/run/base.py +55 -3
  206. agno/run/requirement.py +181 -0
  207. agno/run/team.py +125 -38
  208. agno/run/workflow.py +72 -18
  209. agno/session/agent.py +102 -89
  210. agno/session/summary.py +56 -15
  211. agno/session/team.py +164 -90
  212. agno/session/workflow.py +405 -40
  213. agno/table.py +10 -0
  214. agno/team/team.py +3974 -1903
  215. agno/tools/dalle.py +2 -4
  216. agno/tools/eleven_labs.py +23 -25
  217. agno/tools/exa.py +21 -16
  218. agno/tools/file.py +153 -23
  219. agno/tools/file_generation.py +16 -10
  220. agno/tools/firecrawl.py +15 -7
  221. agno/tools/function.py +193 -38
  222. agno/tools/gmail.py +238 -14
  223. agno/tools/google_drive.py +271 -0
  224. agno/tools/googlecalendar.py +36 -8
  225. agno/tools/googlesheets.py +20 -5
  226. agno/tools/jira.py +20 -0
  227. agno/tools/mcp/__init__.py +10 -0
  228. agno/tools/mcp/mcp.py +331 -0
  229. agno/tools/mcp/multi_mcp.py +347 -0
  230. agno/tools/mcp/params.py +24 -0
  231. agno/tools/mcp_toolbox.py +3 -3
  232. agno/tools/models/nebius.py +5 -5
  233. agno/tools/models_labs.py +20 -10
  234. agno/tools/nano_banana.py +151 -0
  235. agno/tools/notion.py +204 -0
  236. agno/tools/parallel.py +314 -0
  237. agno/tools/postgres.py +76 -36
  238. agno/tools/redshift.py +406 -0
  239. agno/tools/scrapegraph.py +1 -1
  240. agno/tools/shopify.py +1519 -0
  241. agno/tools/slack.py +18 -3
  242. agno/tools/spotify.py +919 -0
  243. agno/tools/tavily.py +146 -0
  244. agno/tools/toolkit.py +25 -0
  245. agno/tools/workflow.py +8 -1
  246. agno/tools/yfinance.py +12 -11
  247. agno/tracing/__init__.py +12 -0
  248. agno/tracing/exporter.py +157 -0
  249. agno/tracing/schemas.py +276 -0
  250. agno/tracing/setup.py +111 -0
  251. agno/utils/agent.py +938 -0
  252. agno/utils/cryptography.py +22 -0
  253. agno/utils/dttm.py +33 -0
  254. agno/utils/events.py +151 -3
  255. agno/utils/gemini.py +15 -5
  256. agno/utils/hooks.py +118 -4
  257. agno/utils/http.py +113 -2
  258. agno/utils/knowledge.py +12 -5
  259. agno/utils/log.py +1 -0
  260. agno/utils/mcp.py +92 -2
  261. agno/utils/media.py +187 -1
  262. agno/utils/merge_dict.py +3 -3
  263. agno/utils/message.py +60 -0
  264. agno/utils/models/ai_foundry.py +9 -2
  265. agno/utils/models/claude.py +49 -14
  266. agno/utils/models/cohere.py +9 -2
  267. agno/utils/models/llama.py +9 -2
  268. agno/utils/models/mistral.py +4 -2
  269. agno/utils/print_response/agent.py +109 -16
  270. agno/utils/print_response/team.py +223 -30
  271. agno/utils/print_response/workflow.py +251 -34
  272. agno/utils/streamlit.py +1 -1
  273. agno/utils/team.py +98 -9
  274. agno/utils/tokens.py +657 -0
  275. agno/vectordb/base.py +39 -7
  276. agno/vectordb/cassandra/cassandra.py +21 -5
  277. agno/vectordb/chroma/chromadb.py +43 -12
  278. agno/vectordb/clickhouse/clickhousedb.py +21 -5
  279. agno/vectordb/couchbase/couchbase.py +29 -5
  280. agno/vectordb/lancedb/lance_db.py +92 -181
  281. agno/vectordb/langchaindb/langchaindb.py +24 -4
  282. agno/vectordb/lightrag/lightrag.py +17 -3
  283. agno/vectordb/llamaindex/llamaindexdb.py +25 -5
  284. agno/vectordb/milvus/milvus.py +50 -37
  285. agno/vectordb/mongodb/__init__.py +7 -1
  286. agno/vectordb/mongodb/mongodb.py +36 -30
  287. agno/vectordb/pgvector/pgvector.py +201 -77
  288. agno/vectordb/pineconedb/pineconedb.py +41 -23
  289. agno/vectordb/qdrant/qdrant.py +67 -54
  290. agno/vectordb/redis/__init__.py +9 -0
  291. agno/vectordb/redis/redisdb.py +682 -0
  292. agno/vectordb/singlestore/singlestore.py +50 -29
  293. agno/vectordb/surrealdb/surrealdb.py +31 -41
  294. agno/vectordb/upstashdb/upstashdb.py +34 -6
  295. agno/vectordb/weaviate/weaviate.py +53 -14
  296. agno/workflow/__init__.py +2 -0
  297. agno/workflow/agent.py +299 -0
  298. agno/workflow/condition.py +120 -18
  299. agno/workflow/loop.py +77 -10
  300. agno/workflow/parallel.py +231 -143
  301. agno/workflow/router.py +118 -17
  302. agno/workflow/step.py +609 -170
  303. agno/workflow/steps.py +73 -6
  304. agno/workflow/types.py +96 -21
  305. agno/workflow/workflow.py +2039 -262
  306. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
  307. agno-2.3.13.dist-info/RECORD +613 -0
  308. agno/tools/googlesearch.py +0 -98
  309. agno/tools/mcp.py +0 -679
  310. agno/tools/memori.py +0 -339
  311. agno-2.1.2.dist-info/RECORD +0 -543
  312. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
  313. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
  314. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
@@ -7,13 +7,15 @@ from uuid import uuid4
7
7
  import httpx
8
8
  from pydantic import BaseModel
9
9
 
10
- from agno.exceptions import ModelProviderError
10
+ from agno.exceptions import ModelAuthenticationError, ModelProviderError
11
11
  from agno.media import Audio
12
12
  from agno.models.base import Model
13
13
  from agno.models.message import Message
14
14
  from agno.models.metrics import Metrics
15
15
  from agno.models.response import ModelResponse
16
16
  from agno.run.agent import RunOutput
17
+ from agno.run.team import TeamRunOutput
18
+ from agno.utils.http import get_default_async_client, get_default_sync_client
17
19
  from agno.utils.log import log_debug, log_error, log_warning
18
20
  from agno.utils.openai import _format_file_for_message, audio_to_message, images_to_message
19
21
  from agno.utils.reasoning import extract_thinking_content
@@ -64,6 +66,7 @@ class OpenAIChat(Model):
64
66
  user: Optional[str] = None
65
67
  top_p: Optional[float] = None
66
68
  service_tier: Optional[str] = None # "auto" | "default" | "flex" | "priority", defaults to "auto" when not set
69
+ strict_output: bool = True # When True, guarantees schema adherence for structured outputs. When False, attempts to follow schema as a guide but may occasionally deviate
67
70
  extra_headers: Optional[Any] = None
68
71
  extra_query: Optional[Any] = None
69
72
  extra_body: Optional[Any] = None
@@ -81,6 +84,10 @@ class OpenAIChat(Model):
81
84
  http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
82
85
  client_params: Optional[Dict[str, Any]] = None
83
86
 
87
+ # Cached clients to avoid recreating them on every request
88
+ client: Optional[OpenAIClient] = None
89
+ async_client: Optional[AsyncOpenAIClient] = None
90
+
84
91
  # The role to map the message role to.
85
92
  default_role_map = {
86
93
  "system": "developer",
@@ -95,7 +102,10 @@ class OpenAIChat(Model):
95
102
  if not self.api_key:
96
103
  self.api_key = getenv("OPENAI_API_KEY")
97
104
  if not self.api_key:
98
- log_error("OPENAI_API_KEY not set. Please set the OPENAI_API_KEY environment variable.")
105
+ raise ModelAuthenticationError(
106
+ message="OPENAI_API_KEY not set. Please set the OPENAI_API_KEY environment variable.",
107
+ model_name=self.name,
108
+ )
99
109
 
100
110
  # Define base client params
101
111
  base_params = {
@@ -118,49 +128,68 @@ class OpenAIChat(Model):
118
128
 
119
129
  def get_client(self) -> OpenAIClient:
120
130
  """
121
- Returns an OpenAI client.
131
+ Returns an OpenAI client. Caches the client to avoid recreating it on every request.
122
132
 
123
133
  Returns:
124
134
  OpenAIClient: An instance of the OpenAI client.
125
135
  """
136
+ # Return cached client if it exists and is not closed
137
+ if self.client is not None and not self.client.is_closed():
138
+ return self.client
139
+
140
+ log_debug(f"Creating new sync OpenAI client for model {self.id}")
126
141
  client_params: Dict[str, Any] = self._get_client_params()
127
142
  if self.http_client:
128
143
  if isinstance(self.http_client, httpx.Client):
129
144
  client_params["http_client"] = self.http_client
130
145
  else:
131
- log_warning("http_client is not an instance of httpx.Client.")
132
- return OpenAIClient(**client_params)
146
+ log_warning("http_client is not an instance of httpx.Client. Using default global httpx.Client.")
147
+ # Use global sync client when user http_client is invalid
148
+ client_params["http_client"] = get_default_sync_client()
149
+ else:
150
+ # Use global sync client when no custom http_client is provided
151
+ client_params["http_client"] = get_default_sync_client()
152
+
153
+ # Create and cache the client
154
+ self.client = OpenAIClient(**client_params)
155
+ return self.client
133
156
 
134
157
  def get_async_client(self) -> AsyncOpenAIClient:
135
158
  """
136
- Returns an asynchronous OpenAI client.
159
+ Returns an asynchronous OpenAI client. Caches the client to avoid recreating it on every request.
137
160
 
138
161
  Returns:
139
162
  AsyncOpenAIClient: An instance of the asynchronous OpenAI client.
140
163
  """
164
+ # Return cached client if it exists and is not closed
165
+ if self.async_client is not None and not self.async_client.is_closed():
166
+ return self.async_client
167
+
168
+ log_debug(f"Creating new async OpenAI client for model {self.id}")
141
169
  client_params: Dict[str, Any] = self._get_client_params()
142
170
  if self.http_client:
143
171
  if isinstance(self.http_client, httpx.AsyncClient):
144
172
  client_params["http_client"] = self.http_client
145
173
  else:
146
- log_warning("http_client is not an instance of httpx.AsyncClient. Using default httpx.AsyncClient.")
147
- # Create a new async HTTP client with custom limits
148
- client_params["http_client"] = httpx.AsyncClient(
149
- limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
174
+ log_warning(
175
+ "http_client is not an instance of httpx.AsyncClient. Using default global httpx.AsyncClient."
150
176
  )
177
+ # Use global async client when user http_client is invalid
178
+ client_params["http_client"] = get_default_async_client()
151
179
  else:
152
- # Create a new async HTTP client with custom limits
153
- client_params["http_client"] = httpx.AsyncClient(
154
- limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
155
- )
156
- return AsyncOpenAIClient(**client_params)
180
+ # Use global async client when no custom http_client is provided
181
+ client_params["http_client"] = get_default_async_client()
182
+
183
+ # Create and cache the client
184
+ self.async_client = AsyncOpenAIClient(**client_params)
185
+ return self.async_client
157
186
 
158
187
  def get_request_params(
159
188
  self,
160
189
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
161
190
  tools: Optional[List[Dict[str, Any]]] = None,
162
191
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
163
- run_response: Optional[RunOutput] = None,
192
+ run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
164
193
  ) -> Dict[str, Any]:
165
194
  """
166
195
  Returns keyword arguments for API requests.
@@ -206,7 +235,7 @@ class OpenAIChat(Model):
206
235
  "json_schema": {
207
236
  "name": response_format.__name__,
208
237
  "schema": schema,
209
- "strict": True,
238
+ "strict": self.strict_output,
210
239
  },
211
240
  }
212
241
  else:
@@ -276,19 +305,22 @@ class OpenAIChat(Model):
276
305
  cleaned_dict = {k: v for k, v in model_dict.items() if v is not None}
277
306
  return cleaned_dict
278
307
 
279
- def _format_message(self, message: Message) -> Dict[str, Any]:
308
+ def _format_message(self, message: Message, compress_tool_results: bool = False) -> Dict[str, Any]:
280
309
  """
281
310
  Format a message into the format expected by OpenAI.
282
311
 
283
312
  Args:
284
313
  message (Message): The message to format.
314
+ compress_tool_results: Whether to compress tool results.
285
315
 
286
316
  Returns:
287
317
  Dict[str, Any]: The formatted message.
288
318
  """
319
+ tool_result = message.get_content(use_compressed_content=compress_tool_results)
320
+
289
321
  message_dict: Dict[str, Any] = {
290
322
  "role": self.role_map[message.role] if self.role_map else self.default_role_map[message.role],
291
- "content": message.content,
323
+ "content": tool_result,
292
324
  "name": message.name,
293
325
  "tool_call_id": message.tool_call_id,
294
326
  "tool_calls": message.tool_calls,
@@ -347,7 +379,8 @@ class OpenAIChat(Model):
347
379
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
348
380
  tools: Optional[List[Dict[str, Any]]] = None,
349
381
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
350
- run_response: Optional[RunOutput] = None,
382
+ run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
383
+ compress_tool_results: bool = False,
351
384
  ) -> ModelResponse:
352
385
  """
353
386
  Send a chat completion request to the OpenAI API and parse the response.
@@ -358,6 +391,7 @@ class OpenAIChat(Model):
358
391
  response_format (Optional[Union[Dict, Type[BaseModel]]]): The response format to use.
359
392
  tools (Optional[List[Dict[str, Any]]]): The tools to use.
360
393
  tool_choice (Optional[Union[str, Dict[str, Any]]]): The tool choice to use.
394
+ compress_tool_results: Whether to compress tool results.
361
395
 
362
396
  Returns:
363
397
  ModelResponse: The chat completion response from the API.
@@ -370,7 +404,7 @@ class OpenAIChat(Model):
370
404
 
371
405
  provider_response = self.get_client().chat.completions.create(
372
406
  model=self.id,
373
- messages=[self._format_message(m) for m in messages], # type: ignore
407
+ messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
374
408
  **self.get_request_params(
375
409
  response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
376
410
  ),
@@ -416,6 +450,9 @@ class OpenAIChat(Model):
416
450
  model_name=self.name,
417
451
  model_id=self.id,
418
452
  ) from e
453
+ except ModelAuthenticationError as e:
454
+ log_error(f"Model authentication error from OpenAI API: {e}")
455
+ raise e
419
456
  except Exception as e:
420
457
  log_error(f"Error from OpenAI API: {e}")
421
458
  raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
@@ -427,7 +464,8 @@ class OpenAIChat(Model):
427
464
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
428
465
  tools: Optional[List[Dict[str, Any]]] = None,
429
466
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
430
- run_response: Optional[RunOutput] = None,
467
+ run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
468
+ compress_tool_results: bool = False,
431
469
  ) -> ModelResponse:
432
470
  """
433
471
  Sends an asynchronous chat completion request to the OpenAI API.
@@ -438,6 +476,7 @@ class OpenAIChat(Model):
438
476
  response_format (Optional[Union[Dict, Type[BaseModel]]]): The response format to use.
439
477
  tools (Optional[List[Dict[str, Any]]]): The tools to use.
440
478
  tool_choice (Optional[Union[str, Dict[str, Any]]]): The tool choice to use.
479
+ compress_tool_results: Whether to compress tool results.
441
480
 
442
481
  Returns:
443
482
  ModelResponse: The chat completion response from the API.
@@ -449,7 +488,7 @@ class OpenAIChat(Model):
449
488
  assistant_message.metrics.start_timer()
450
489
  response = await self.get_async_client().chat.completions.create(
451
490
  model=self.id,
452
- messages=[self._format_message(m) for m in messages], # type: ignore
491
+ messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
453
492
  **self.get_request_params(
454
493
  response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
455
494
  ),
@@ -495,6 +534,9 @@ class OpenAIChat(Model):
495
534
  model_name=self.name,
496
535
  model_id=self.id,
497
536
  ) from e
537
+ except ModelAuthenticationError as e:
538
+ log_error(f"Model authentication error from OpenAI API: {e}")
539
+ raise e
498
540
  except Exception as e:
499
541
  log_error(f"Error from OpenAI API: {e}")
500
542
  raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
@@ -506,13 +548,15 @@ class OpenAIChat(Model):
506
548
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
507
549
  tools: Optional[List[Dict[str, Any]]] = None,
508
550
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
509
- run_response: Optional[RunOutput] = None,
551
+ run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
552
+ compress_tool_results: bool = False,
510
553
  ) -> Iterator[ModelResponse]:
511
554
  """
512
555
  Send a streaming chat completion request to the OpenAI API.
513
556
 
514
557
  Args:
515
558
  messages (List[Message]): A list of messages to send to the model.
559
+ compress_tool_results: Whether to compress tool results.
516
560
 
517
561
  Returns:
518
562
  Iterator[ModelResponse]: An iterator of model responses.
@@ -526,7 +570,7 @@ class OpenAIChat(Model):
526
570
 
527
571
  for chunk in self.get_client().chat.completions.create(
528
572
  model=self.id,
529
- messages=[self._format_message(m) for m in messages], # type: ignore
573
+ messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
530
574
  stream=True,
531
575
  stream_options={"include_usage": True},
532
576
  **self.get_request_params(
@@ -571,6 +615,9 @@ class OpenAIChat(Model):
571
615
  model_name=self.name,
572
616
  model_id=self.id,
573
617
  ) from e
618
+ except ModelAuthenticationError as e:
619
+ log_error(f"Model authentication error from OpenAI API: {e}")
620
+ raise e
574
621
  except Exception as e:
575
622
  log_error(f"Error from OpenAI API: {e}")
576
623
  raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
@@ -582,13 +629,15 @@ class OpenAIChat(Model):
582
629
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
583
630
  tools: Optional[List[Dict[str, Any]]] = None,
584
631
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
585
- run_response: Optional[RunOutput] = None,
632
+ run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
633
+ compress_tool_results: bool = False,
586
634
  ) -> AsyncIterator[ModelResponse]:
587
635
  """
588
636
  Sends an asynchronous streaming chat completion request to the OpenAI API.
589
637
 
590
638
  Args:
591
639
  messages (List[Message]): A list of messages to send to the model.
640
+ compress_tool_results: Whether to compress tool results.
592
641
 
593
642
  Returns:
594
643
  Any: An asynchronous iterator of model responses.
@@ -602,7 +651,7 @@ class OpenAIChat(Model):
602
651
 
603
652
  async_stream = await self.get_async_client().chat.completions.create(
604
653
  model=self.id,
605
- messages=[self._format_message(m) for m in messages], # type: ignore
654
+ messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
606
655
  stream=True,
607
656
  stream_options={"include_usage": True},
608
657
  **self.get_request_params(
@@ -649,6 +698,9 @@ class OpenAIChat(Model):
649
698
  model_name=self.name,
650
699
  model_id=self.id,
651
700
  ) from e
701
+ except ModelAuthenticationError as e:
702
+ log_error(f"Model authentication error from OpenAI API: {e}")
703
+ raise e
652
704
  except Exception as e:
653
705
  log_error(f"Error from OpenAI API: {e}")
654
706
  raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
@@ -716,7 +768,6 @@ class OpenAIChat(Model):
716
768
  # Add role
717
769
  if response_message.role is not None:
718
770
  model_response.role = response_message.role
719
-
720
771
  # Add content
721
772
  if response_message.content is not None:
722
773
  model_response.content = response_message.content
@@ -762,10 +813,22 @@ class OpenAIChat(Model):
762
813
 
763
814
  if hasattr(response_message, "reasoning_content") and response_message.reasoning_content is not None: # type: ignore
764
815
  model_response.reasoning_content = response_message.reasoning_content # type: ignore
816
+ elif hasattr(response_message, "reasoning") and response_message.reasoning is not None: # type: ignore
817
+ model_response.reasoning_content = response_message.reasoning # type: ignore
765
818
 
766
819
  if response.usage is not None:
767
820
  model_response.response_usage = self._get_metrics(response.usage)
768
821
 
822
+ if model_response.provider_data is None:
823
+ model_response.provider_data = {}
824
+
825
+ if response.id:
826
+ model_response.provider_data["id"] = response.id
827
+ if response.system_fingerprint:
828
+ model_response.provider_data["system_fingerprint"] = response.system_fingerprint
829
+ if response.model_extra:
830
+ model_response.provider_data["model_extra"] = response.model_extra
831
+
769
832
  return model_response
770
833
 
771
834
  def _parse_provider_response_delta(self, response_delta: ChatCompletionChunk) -> ModelResponse:
@@ -782,18 +845,30 @@ class OpenAIChat(Model):
782
845
 
783
846
  if response_delta.choices and len(response_delta.choices) > 0:
784
847
  choice_delta: ChoiceDelta = response_delta.choices[0].delta
785
-
786
848
  if choice_delta:
787
849
  # Add content
788
850
  if choice_delta.content is not None:
789
851
  model_response.content = choice_delta.content
790
852
 
853
+ # We only want to handle these if content is present
854
+ if model_response.provider_data is None:
855
+ model_response.provider_data = {}
856
+
857
+ if response_delta.id:
858
+ model_response.provider_data["id"] = response_delta.id
859
+ if response_delta.system_fingerprint:
860
+ model_response.provider_data["system_fingerprint"] = response_delta.system_fingerprint
861
+ if response_delta.model_extra:
862
+ model_response.provider_data["model_extra"] = response_delta.model_extra
863
+
791
864
  # Add tool calls
792
865
  if choice_delta.tool_calls is not None:
793
866
  model_response.tool_calls = choice_delta.tool_calls # type: ignore
794
867
 
795
868
  if hasattr(choice_delta, "reasoning_content") and choice_delta.reasoning_content is not None:
796
869
  model_response.reasoning_content = choice_delta.reasoning_content
870
+ elif hasattr(choice_delta, "reasoning") and choice_delta.reasoning is not None:
871
+ model_response.reasoning_content = choice_delta.reasoning
797
872
 
798
873
  # Add audio if present
799
874
  if hasattr(choice_delta, "audio") and choice_delta.audio is not None: