agno 2.2.5__py3-none-any.whl → 2.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -63,7 +63,7 @@ class Cerebras(Model):
63
63
  max_retries: Optional[int] = None
64
64
  default_headers: Optional[Any] = None
65
65
  default_query: Optional[Any] = None
66
- http_client: Optional[httpx.Client] = None
66
+ http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
67
67
  client_params: Optional[Dict[str, Any]] = None
68
68
 
69
69
  # Cerebras clients
@@ -102,12 +102,15 @@ class Cerebras(Model):
102
102
  Returns:
103
103
  CerebrasClient: An instance of the Cerebras client.
104
104
  """
105
- if self.client:
105
+ if self.client and not self.client.is_closed():
106
106
  return self.client
107
107
 
108
108
  client_params: Dict[str, Any] = self._get_client_params()
109
- if self.http_client is not None:
110
- client_params["http_client"] = self.http_client
109
+ if self.http_client:
110
+ if isinstance(self.http_client, httpx.Client):
111
+ client_params["http_client"] = self.http_client
112
+ else:
113
+ log_debug("http_client is not an instance of httpx.Client.")
111
114
  self.client = CerebrasClient(**client_params)
112
115
  return self.client
113
116
 
@@ -118,13 +121,15 @@ class Cerebras(Model):
118
121
  Returns:
119
122
  AsyncCerebras: An instance of the asynchronous Cerebras client.
120
123
  """
121
- if self.async_client:
124
+ if self.async_client and not self.async_client.is_closed():
122
125
  return self.async_client
123
126
 
124
127
  client_params: Dict[str, Any] = self._get_client_params()
125
- if self.http_client:
128
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
126
129
  client_params["http_client"] = self.http_client
127
130
  else:
131
+ if self.http_client:
132
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
128
133
  # Create a new async HTTP client with custom limits
129
134
  client_params["http_client"] = httpx.AsyncClient(
130
135
  limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
agno/models/groq/groq.py CHANGED
@@ -61,7 +61,7 @@ class Groq(Model):
61
61
  max_retries: Optional[int] = None
62
62
  default_headers: Optional[Any] = None
63
63
  default_query: Optional[Any] = None
64
- http_client: Optional[httpx.Client] = None
64
+ http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
65
65
  client_params: Optional[Dict[str, Any]] = None
66
66
 
67
67
  # Groq clients
@@ -115,18 +115,21 @@ class Groq(Model):
115
115
  Returns:
116
116
  AsyncGroqClient: An instance of the asynchronous Groq client.
117
117
  """
118
- if self.async_client:
118
+ if self.async_client and not self.async_client.is_closed():
119
119
  return self.async_client
120
120
 
121
121
  client_params: Dict[str, Any] = self._get_client_params()
122
- if self.http_client:
122
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
123
123
  client_params["http_client"] = self.http_client
124
124
  else:
125
+ if self.http_client:
126
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
125
127
  # Create a new async HTTP client with custom limits
126
128
  client_params["http_client"] = httpx.AsyncClient(
127
129
  limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
128
130
  )
129
- return AsyncGroqClient(**client_params)
131
+ self.async_client = AsyncGroqClient(**client_params)
132
+ return self.async_client
130
133
 
131
134
  def get_request_params(
132
135
  self,
agno/models/meta/llama.py CHANGED
@@ -61,7 +61,7 @@ class Llama(Model):
61
61
  max_retries: Optional[int] = None
62
62
  default_headers: Optional[Any] = None
63
63
  default_query: Optional[Any] = None
64
- http_client: Optional[httpx.Client] = None
64
+ http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
65
65
  client_params: Optional[Dict[str, Any]] = None
66
66
 
67
67
  # OpenAI clients
@@ -104,8 +104,11 @@ class Llama(Model):
104
104
  return self.client
105
105
 
106
106
  client_params: Dict[str, Any] = self._get_client_params()
107
- if self.http_client is not None:
108
- client_params["http_client"] = self.http_client
107
+ if self.http_client:
108
+ if isinstance(self.http_client, httpx.Client):
109
+ client_params["http_client"] = self.http_client
110
+ else:
111
+ log_debug("http_client is not an instance of httpx.Client.")
109
112
  self.client = LlamaAPIClient(**client_params)
110
113
  return self.client
111
114
 
@@ -116,18 +119,21 @@ class Llama(Model):
116
119
  Returns:
117
120
  AsyncLlamaAPIClient: An instance of the asynchronous Llama client.
118
121
  """
119
- if self.async_client:
122
+ if self.async_client and not self.async_client.is_closed():
120
123
  return self.async_client
121
124
 
122
125
  client_params: Dict[str, Any] = self._get_client_params()
123
- if self.http_client:
126
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
124
127
  client_params["http_client"] = self.http_client
125
128
  else:
129
+ if self.http_client:
130
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
126
131
  # Create a new async HTTP client with custom limits
127
132
  client_params["http_client"] = httpx.AsyncClient(
128
133
  limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
129
134
  )
130
- return AsyncLlamaAPIClient(**client_params)
135
+ self.async_client = AsyncLlamaAPIClient(**client_params)
136
+ return self.async_client
131
137
 
132
138
  def get_request_params(
133
139
  self,
@@ -62,6 +62,9 @@ class LlamaOpenAI(OpenAILike):
62
62
 
63
63
  def get_async_client(self):
64
64
  """Override to provide custom httpx client that properly handles redirects"""
65
+ if self.async_client and not self.async_client.is_closed():
66
+ return self.async_client
67
+
65
68
  client_params = self._get_client_params()
66
69
 
67
70
  # Llama gives a 307 redirect error, so we need to set up a custom client to allow redirects
@@ -71,4 +74,5 @@ class LlamaOpenAI(OpenAILike):
71
74
  timeout=httpx.Timeout(30.0),
72
75
  )
73
76
 
74
- return AsyncOpenAIClient(**client_params)
77
+ self.async_client = AsyncOpenAIClient(**client_params)
78
+ return self.async_client
@@ -81,6 +81,10 @@ class OpenAIChat(Model):
81
81
  http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
82
82
  client_params: Optional[Dict[str, Any]] = None
83
83
 
84
+ # OpenAI clients
85
+ client: Optional[OpenAIClient] = None
86
+ async_client: Optional[AsyncOpenAIClient] = None
87
+
84
88
  # The role to map the message role to.
85
89
  default_role_map = {
86
90
  "system": "developer",
@@ -123,13 +127,18 @@ class OpenAIChat(Model):
123
127
  Returns:
124
128
  OpenAIClient: An instance of the OpenAI client.
125
129
  """
130
+ if self.client and not self.client.is_closed():
131
+ return self.client
132
+
126
133
  client_params: Dict[str, Any] = self._get_client_params()
127
134
  if self.http_client:
128
135
  if isinstance(self.http_client, httpx.Client):
129
136
  client_params["http_client"] = self.http_client
130
137
  else:
131
- log_warning("http_client is not an instance of httpx.Client.")
132
- return OpenAIClient(**client_params)
138
+ log_debug("http_client is not an instance of httpx.Client.")
139
+
140
+ self.client = OpenAIClient(**client_params)
141
+ return self.client
133
142
 
134
143
  def get_async_client(self) -> AsyncOpenAIClient:
135
144
  """
@@ -138,22 +147,21 @@ class OpenAIChat(Model):
138
147
  Returns:
139
148
  AsyncOpenAIClient: An instance of the asynchronous OpenAI client.
140
149
  """
150
+ if self.async_client and not self.async_client.is_closed():
151
+ return self.async_client
152
+
141
153
  client_params: Dict[str, Any] = self._get_client_params()
142
- if self.http_client:
143
- if isinstance(self.http_client, httpx.AsyncClient):
144
- client_params["http_client"] = self.http_client
145
- else:
146
- log_warning("http_client is not an instance of httpx.AsyncClient. Using default httpx.AsyncClient.")
147
- # Create a new async HTTP client with custom limits
148
- client_params["http_client"] = httpx.AsyncClient(
149
- limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
150
- )
154
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
155
+ client_params["http_client"] = self.http_client
151
156
  else:
157
+ if self.http_client:
158
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
152
159
  # Create a new async HTTP client with custom limits
153
160
  client_params["http_client"] = httpx.AsyncClient(
154
161
  limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
155
162
  )
156
- return AsyncOpenAIClient(**client_params)
163
+ self.async_client = AsyncOpenAIClient(**client_params)
164
+ return self.async_client
157
165
 
158
166
  def get_request_params(
159
167
  self,
@@ -66,7 +66,7 @@ class OpenAIResponses(Model):
66
66
  max_retries: Optional[int] = None
67
67
  default_headers: Optional[Dict[str, str]] = None
68
68
  default_query: Optional[Dict[str, str]] = None
69
- http_client: Optional[httpx.Client] = None
69
+ http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
70
70
  client_params: Optional[Dict[str, Any]] = None
71
71
 
72
72
  # Parameters affecting built-in tools
@@ -148,8 +148,11 @@ class OpenAIResponses(Model):
148
148
  return self.client
149
149
 
150
150
  client_params: Dict[str, Any] = self._get_client_params()
151
- if self.http_client is not None:
152
- client_params["http_client"] = self.http_client
151
+ if self.http_client:
152
+ if isinstance(self.http_client, httpx.Client):
153
+ client_params["http_client"] = self.http_client
154
+ else:
155
+ log_debug("http_client is not an instance of httpx.Client.")
153
156
 
154
157
  self.client = OpenAI(**client_params)
155
158
  return self.client
@@ -161,13 +164,15 @@ class OpenAIResponses(Model):
161
164
  Returns:
162
165
  AsyncOpenAI: An instance of the asynchronous OpenAI client.
163
166
  """
164
- if self.async_client:
167
+ if self.async_client and not self.async_client.is_closed():
165
168
  return self.async_client
166
169
 
167
170
  client_params: Dict[str, Any] = self._get_client_params()
168
- if self.http_client:
171
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
169
172
  client_params["http_client"] = self.http_client
170
173
  else:
174
+ if self.http_client:
175
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
171
176
  # Create a new async HTTP client with custom limits
172
177
  client_params["http_client"] = httpx.AsyncClient(
173
178
  limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
agno/models/utils.py CHANGED
@@ -1,20 +1,266 @@
1
+ from typing import Optional, Union
2
+
1
3
  from agno.models.base import Model
2
4
 
3
5
 
4
- # TODO: add all supported models
5
- def get_model(model_id: str, model_provider: str) -> Model:
6
- """Return the right Agno model instance given a pair of model provider and id"""
7
- if model_provider == "openai":
8
- from agno.models.openai import OpenAIChat
6
+ def _get_model_class(model_id: str, model_provider: str) -> Model:
7
+ if model_provider == "aimlapi":
8
+ from agno.models.aimlapi import AIMLAPI
9
+
10
+ return AIMLAPI(id=model_id)
9
11
 
10
- return OpenAIChat(id=model_id)
11
12
  elif model_provider == "anthropic":
12
13
  from agno.models.anthropic import Claude
13
14
 
14
15
  return Claude(id=model_id)
15
- elif model_provider == "gemini":
16
+
17
+ elif model_provider == "aws-bedrock":
18
+ from agno.models.aws import AwsBedrock
19
+
20
+ return AwsBedrock(id=model_id)
21
+
22
+ elif model_provider == "aws-claude":
23
+ from agno.models.aws import Claude as AWSClaude
24
+
25
+ return AWSClaude(id=model_id)
26
+
27
+ elif model_provider == "azure-ai-foundry":
28
+ from agno.models.azure import AzureAIFoundry
29
+
30
+ return AzureAIFoundry(id=model_id)
31
+
32
+ elif model_provider == "azure-openai":
33
+ from agno.models.azure import AzureOpenAI
34
+
35
+ return AzureOpenAI(id=model_id)
36
+
37
+ elif model_provider == "cerebras":
38
+ from agno.models.cerebras import Cerebras
39
+
40
+ return Cerebras(id=model_id)
41
+
42
+ elif model_provider == "cerebras-openai":
43
+ from agno.models.cerebras import CerebrasOpenAI
44
+
45
+ return CerebrasOpenAI(id=model_id)
46
+
47
+ elif model_provider == "cohere":
48
+ from agno.models.cohere import Cohere
49
+
50
+ return Cohere(id=model_id)
51
+
52
+ elif model_provider == "cometapi":
53
+ from agno.models.cometapi import CometAPI
54
+
55
+ return CometAPI(id=model_id)
56
+
57
+ elif model_provider == "dashscope":
58
+ from agno.models.dashscope import DashScope
59
+
60
+ return DashScope(id=model_id)
61
+
62
+ elif model_provider == "deepinfra":
63
+ from agno.models.deepinfra import DeepInfra
64
+
65
+ return DeepInfra(id=model_id)
66
+
67
+ elif model_provider == "deepseek":
68
+ from agno.models.deepseek import DeepSeek
69
+
70
+ return DeepSeek(id=model_id)
71
+
72
+ elif model_provider == "fireworks":
73
+ from agno.models.fireworks import Fireworks
74
+
75
+ return Fireworks(id=model_id)
76
+
77
+ elif model_provider == "google":
16
78
  from agno.models.google import Gemini
17
79
 
18
80
  return Gemini(id=model_id)
81
+
82
+ elif model_provider == "groq":
83
+ from agno.models.groq import Groq
84
+
85
+ return Groq(id=model_id)
86
+
87
+ elif model_provider == "huggingface":
88
+ from agno.models.huggingface import HuggingFace
89
+
90
+ return HuggingFace(id=model_id)
91
+
92
+ elif model_provider == "ibm":
93
+ from agno.models.ibm import WatsonX
94
+
95
+ return WatsonX(id=model_id)
96
+
97
+ elif model_provider == "internlm":
98
+ from agno.models.internlm import InternLM
99
+
100
+ return InternLM(id=model_id)
101
+
102
+ elif model_provider == "langdb":
103
+ from agno.models.langdb import LangDB
104
+
105
+ return LangDB(id=model_id)
106
+
107
+ elif model_provider == "litellm":
108
+ from agno.models.litellm import LiteLLM
109
+
110
+ return LiteLLM(id=model_id)
111
+
112
+ elif model_provider == "litellm-openai":
113
+ from agno.models.litellm import LiteLLMOpenAI
114
+
115
+ return LiteLLMOpenAI(id=model_id)
116
+
117
+ elif model_provider == "llama-cpp":
118
+ from agno.models.llama_cpp import LlamaCpp
119
+
120
+ return LlamaCpp(id=model_id)
121
+
122
+ elif model_provider == "llama-openai":
123
+ from agno.models.meta import LlamaOpenAI
124
+
125
+ return LlamaOpenAI(id=model_id)
126
+
127
+ elif model_provider == "lmstudio":
128
+ from agno.models.lmstudio import LMStudio
129
+
130
+ return LMStudio(id=model_id)
131
+
132
+ elif model_provider == "meta":
133
+ from agno.models.meta import Llama
134
+
135
+ return Llama(id=model_id)
136
+
137
+ elif model_provider == "mistral":
138
+ from agno.models.mistral import MistralChat
139
+
140
+ return MistralChat(id=model_id)
141
+
142
+ elif model_provider == "nebius":
143
+ from agno.models.nebius import Nebius
144
+
145
+ return Nebius(id=model_id)
146
+
147
+ elif model_provider == "nexus":
148
+ from agno.models.nexus import Nexus
149
+
150
+ return Nexus(id=model_id)
151
+
152
+ elif model_provider == "nvidia":
153
+ from agno.models.nvidia import Nvidia
154
+
155
+ return Nvidia(id=model_id)
156
+
157
+ elif model_provider == "ollama":
158
+ from agno.models.ollama import Ollama
159
+
160
+ return Ollama(id=model_id)
161
+
162
+ elif model_provider == "openai":
163
+ from agno.models.openai import OpenAIChat
164
+
165
+ return OpenAIChat(id=model_id)
166
+
167
+ elif model_provider == "openai-responses":
168
+ from agno.models.openai import OpenAIResponses
169
+
170
+ return OpenAIResponses(id=model_id)
171
+
172
+ elif model_provider == "openrouter":
173
+ from agno.models.openrouter import OpenRouter
174
+
175
+ return OpenRouter(id=model_id)
176
+
177
+ elif model_provider == "perplexity":
178
+ from agno.models.perplexity import Perplexity
179
+
180
+ return Perplexity(id=model_id)
181
+
182
+ elif model_provider == "portkey":
183
+ from agno.models.portkey import Portkey
184
+
185
+ return Portkey(id=model_id)
186
+
187
+ elif model_provider == "requesty":
188
+ from agno.models.requesty import Requesty
189
+
190
+ return Requesty(id=model_id)
191
+
192
+ elif model_provider == "sambanova":
193
+ from agno.models.sambanova import Sambanova
194
+
195
+ return Sambanova(id=model_id)
196
+
197
+ elif model_provider == "siliconflow":
198
+ from agno.models.siliconflow import Siliconflow
199
+
200
+ return Siliconflow(id=model_id)
201
+
202
+ elif model_provider == "together":
203
+ from agno.models.together import Together
204
+
205
+ return Together(id=model_id)
206
+
207
+ elif model_provider == "vercel":
208
+ from agno.models.vercel import V0
209
+
210
+ return V0(id=model_id)
211
+
212
+ elif model_provider == "vertexai-claude":
213
+ from agno.models.vertexai.claude import Claude as VertexAIClaude
214
+
215
+ return VertexAIClaude(id=model_id)
216
+
217
+ elif model_provider == "vllm":
218
+ from agno.models.vllm import VLLM
219
+
220
+ return VLLM(id=model_id)
221
+
222
+ elif model_provider == "xai":
223
+ from agno.models.xai import xAI
224
+
225
+ return xAI(id=model_id)
226
+
227
+ else:
228
+ raise ValueError(f"Model provider '{model_provider}' is not supported.")
229
+
230
+
231
+ def _parse_model_string(model_string: str) -> Model:
232
+ if not model_string or not isinstance(model_string, str):
233
+ raise ValueError(f"Model string must be a non-empty string, got: {model_string}")
234
+
235
+ if ":" not in model_string:
236
+ raise ValueError(
237
+ f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
238
+ )
239
+
240
+ parts = model_string.split(":", 1)
241
+ if len(parts) != 2:
242
+ raise ValueError(
243
+ f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
244
+ )
245
+
246
+ model_provider, model_id = parts
247
+ model_provider = model_provider.strip().lower()
248
+ model_id = model_id.strip()
249
+
250
+ if not model_provider or not model_id:
251
+ raise ValueError(
252
+ f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
253
+ )
254
+
255
+ return _get_model_class(model_id, model_provider)
256
+
257
+
258
+ def get_model(model: Union[Model, str, None]) -> Optional[Model]:
259
+ if model is None:
260
+ return None
261
+ elif isinstance(model, Model):
262
+ return model
263
+ elif isinstance(model, str):
264
+ return _parse_model_string(model)
19
265
  else:
20
- raise ValueError(f"Model provider {model_provider} not supported")
266
+ raise ValueError("Model must be a Model instance, string, or None")
@@ -5,10 +5,7 @@ from typing import Any, Dict, Optional
5
5
  from agno.models.anthropic import Claude as AnthropicClaude
6
6
 
7
7
  try:
8
- from anthropic import AnthropicVertex as AnthropicClient
9
- from anthropic import (
10
- AsyncAnthropicVertex as AsyncAnthropicClient,
11
- )
8
+ from anthropic import AnthropicVertex, AsyncAnthropicVertex
12
9
  except ImportError as e:
13
10
  raise ImportError("`anthropic` not installed. Please install it with `pip install anthropic`") from e
14
11
 
@@ -25,15 +22,14 @@ class Claude(AnthropicClaude):
25
22
  name: str = "Claude"
26
23
  provider: str = "VertexAI"
27
24
 
25
+ client: Optional[AnthropicVertex] = None # type: ignore
26
+ async_client: Optional[AsyncAnthropicVertex] = None # type: ignore
27
+
28
28
  # Client parameters
29
29
  region: Optional[str] = None
30
30
  project_id: Optional[str] = None
31
31
  base_url: Optional[str] = None
32
32
 
33
- # Anthropic clients
34
- client: Optional[AnthropicClient] = None
35
- async_client: Optional[AsyncAnthropicClient] = None
36
-
37
33
  def _get_client_params(self) -> Dict[str, Any]:
38
34
  client_params: Dict[str, Any] = {}
39
35
 
@@ -51,7 +47,7 @@ class Claude(AnthropicClaude):
51
47
  client_params["default_headers"] = self.default_headers
52
48
  return client_params
53
49
 
54
- def get_client(self) -> AnthropicClient:
50
+ def get_client(self):
55
51
  """
56
52
  Returns an instance of the Anthropic client.
57
53
  """
@@ -59,16 +55,16 @@ class Claude(AnthropicClaude):
59
55
  return self.client
60
56
 
61
57
  _client_params = self._get_client_params()
62
- self.client = AnthropicClient(**_client_params)
58
+ self.client = AnthropicVertex(**_client_params)
63
59
  return self.client
64
60
 
65
- def get_async_client(self) -> AsyncAnthropicClient:
61
+ def get_async_client(self):
66
62
  """
67
63
  Returns an instance of the async Anthropic client.
68
64
  """
69
- if self.async_client:
65
+ if self.async_client and not self.async_client.is_closed():
70
66
  return self.async_client
71
67
 
72
68
  _client_params = self._get_client_params()
73
- self.async_client = AsyncAnthropicClient(**_client_params)
69
+ self.async_client = AsyncAnthropicVertex(**_client_params)
74
70
  return self.async_client
@@ -356,10 +356,10 @@ def attach_routes(
356
356
  ):
357
357
  default_model = deepcopy(agent.model)
358
358
  if eval_run_input.model_id != agent.model.id or eval_run_input.model_provider != agent.model.provider:
359
- model = get_model(
360
- model_id=eval_run_input.model_id.lower(),
361
- model_provider=eval_run_input.model_provider.lower(),
362
- )
359
+ model_provider = eval_run_input.model_provider.lower()
360
+ model_id = eval_run_input.model_id.lower()
361
+ model_string = f"{model_provider}:{model_id}"
362
+ model = get_model(model_string)
363
363
  agent.model = model
364
364
 
365
365
  team = None
@@ -378,10 +378,10 @@ def attach_routes(
378
378
  ):
379
379
  default_model = deepcopy(team.model)
380
380
  if eval_run_input.model_id != team.model.id or eval_run_input.model_provider != team.model.provider:
381
- model = get_model(
382
- model_id=eval_run_input.model_id.lower(),
383
- model_provider=eval_run_input.model_provider.lower(),
384
- )
381
+ model_provider = eval_run_input.model_provider.lower()
382
+ model_id = eval_run_input.model_id.lower()
383
+ model_string = f"{model_provider}:{model_id}"
384
+ model = get_model(model_string)
385
385
  team.model = model
386
386
 
387
387
  agent = None
@@ -33,6 +33,7 @@ async def run_accuracy_eval(
33
33
  additional_context=eval_run_input.additional_context,
34
34
  num_iterations=eval_run_input.num_iterations or 1,
35
35
  name=eval_run_input.name,
36
+ model=default_model,
36
37
  )
37
38
 
38
39
  result = accuracy_eval.run(print_results=False, print_summary=False)