agno 2.2.5__py3-none-any.whl → 2.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. agno/agent/agent.py +500 -423
  2. agno/api/os.py +1 -1
  3. agno/culture/manager.py +12 -8
  4. agno/guardrails/prompt_injection.py +1 -0
  5. agno/knowledge/chunking/agentic.py +6 -2
  6. agno/knowledge/embedder/vllm.py +262 -0
  7. agno/knowledge/knowledge.py +37 -5
  8. agno/memory/manager.py +9 -4
  9. agno/models/anthropic/claude.py +1 -2
  10. agno/models/azure/ai_foundry.py +31 -14
  11. agno/models/azure/openai_chat.py +12 -4
  12. agno/models/base.py +106 -65
  13. agno/models/cerebras/cerebras.py +11 -6
  14. agno/models/groq/groq.py +7 -4
  15. agno/models/meta/llama.py +12 -6
  16. agno/models/meta/llama_openai.py +5 -1
  17. agno/models/openai/chat.py +26 -17
  18. agno/models/openai/responses.py +11 -63
  19. agno/models/requesty/requesty.py +5 -2
  20. agno/models/utils.py +254 -8
  21. agno/models/vertexai/claude.py +9 -13
  22. agno/os/app.py +13 -12
  23. agno/os/routers/evals/evals.py +8 -8
  24. agno/os/routers/evals/utils.py +1 -0
  25. agno/os/schema.py +56 -38
  26. agno/os/utils.py +27 -0
  27. agno/run/__init__.py +6 -0
  28. agno/run/agent.py +5 -0
  29. agno/run/base.py +18 -1
  30. agno/run/team.py +13 -9
  31. agno/run/workflow.py +39 -0
  32. agno/session/summary.py +8 -2
  33. agno/session/workflow.py +4 -3
  34. agno/team/team.py +302 -369
  35. agno/tools/exa.py +21 -16
  36. agno/tools/file.py +153 -25
  37. agno/tools/function.py +98 -17
  38. agno/tools/mcp/mcp.py +8 -1
  39. agno/tools/notion.py +204 -0
  40. agno/utils/agent.py +78 -0
  41. agno/utils/events.py +2 -0
  42. agno/utils/hooks.py +1 -1
  43. agno/utils/models/claude.py +25 -8
  44. agno/utils/print_response/workflow.py +115 -16
  45. agno/vectordb/__init__.py +2 -1
  46. agno/vectordb/milvus/milvus.py +5 -0
  47. agno/vectordb/redis/__init__.py +5 -0
  48. agno/vectordb/redis/redisdb.py +687 -0
  49. agno/workflow/__init__.py +2 -0
  50. agno/workflow/agent.py +299 -0
  51. agno/workflow/step.py +13 -2
  52. agno/workflow/workflow.py +969 -72
  53. {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/METADATA +10 -3
  54. {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/RECORD +57 -52
  55. {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/WHEEL +0 -0
  56. {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/licenses/LICENSE +0 -0
  57. {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,7 @@ from typing_extensions import Literal
8
8
 
9
9
  from agno.exceptions import ModelProviderError
10
10
  from agno.media import File
11
- from agno.models.base import MessageData, Model
11
+ from agno.models.base import Model
12
12
  from agno.models.message import Citations, Message, UrlCitation
13
13
  from agno.models.metrics import Metrics
14
14
  from agno.models.response import ModelResponse
@@ -66,7 +66,7 @@ class OpenAIResponses(Model):
66
66
  max_retries: Optional[int] = None
67
67
  default_headers: Optional[Dict[str, str]] = None
68
68
  default_query: Optional[Dict[str, str]] = None
69
- http_client: Optional[httpx.Client] = None
69
+ http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
70
70
  client_params: Optional[Dict[str, Any]] = None
71
71
 
72
72
  # Parameters affecting built-in tools
@@ -148,8 +148,11 @@ class OpenAIResponses(Model):
148
148
  return self.client
149
149
 
150
150
  client_params: Dict[str, Any] = self._get_client_params()
151
- if self.http_client is not None:
152
- client_params["http_client"] = self.http_client
151
+ if self.http_client:
152
+ if isinstance(self.http_client, httpx.Client):
153
+ client_params["http_client"] = self.http_client
154
+ else:
155
+ log_debug("http_client is not an instance of httpx.Client.")
153
156
 
154
157
  self.client = OpenAI(**client_params)
155
158
  return self.client
@@ -161,13 +164,15 @@ class OpenAIResponses(Model):
161
164
  Returns:
162
165
  AsyncOpenAI: An instance of the asynchronous OpenAI client.
163
166
  """
164
- if self.async_client:
167
+ if self.async_client and not self.async_client.is_closed():
165
168
  return self.async_client
166
169
 
167
170
  client_params: Dict[str, Any] = self._get_client_params()
168
- if self.http_client:
171
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
169
172
  client_params["http_client"] = self.http_client
170
173
  else:
174
+ if self.http_client:
175
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
171
176
  # Create a new async HTTP client with custom limits
172
177
  client_params["http_client"] = httpx.AsyncClient(
173
178
  limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
@@ -805,63 +810,6 @@ class OpenAIResponses(Model):
805
810
  _fc_message.tool_call_id = tool_call_ids[_fc_message_index]
806
811
  messages.append(_fc_message)
807
812
 
808
- def process_response_stream(
809
- self,
810
- messages: List[Message],
811
- assistant_message: Message,
812
- stream_data: MessageData,
813
- response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
814
- tools: Optional[List[Dict[str, Any]]] = None,
815
- tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
816
- run_response: Optional[RunOutput] = None,
817
- ) -> Iterator[ModelResponse]:
818
- """Process the synchronous response stream."""
819
- for model_response_delta in self.invoke_stream(
820
- messages=messages,
821
- assistant_message=assistant_message,
822
- tools=tools,
823
- response_format=response_format,
824
- tool_choice=tool_choice,
825
- run_response=run_response,
826
- ):
827
- yield from self._populate_stream_data_and_assistant_message(
828
- stream_data=stream_data,
829
- assistant_message=assistant_message,
830
- model_response_delta=model_response_delta,
831
- )
832
-
833
- # Add final metrics to assistant message
834
- self._populate_assistant_message(assistant_message=assistant_message, provider_response=model_response_delta)
835
-
836
- async def aprocess_response_stream(
837
- self,
838
- messages: List[Message],
839
- assistant_message: Message,
840
- stream_data: MessageData,
841
- response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
842
- tools: Optional[List[Dict[str, Any]]] = None,
843
- tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
844
- run_response: Optional[RunOutput] = None,
845
- ) -> AsyncIterator[ModelResponse]:
846
- """Process the asynchronous response stream."""
847
- async for model_response_delta in self.ainvoke_stream(
848
- messages=messages,
849
- assistant_message=assistant_message,
850
- tools=tools,
851
- response_format=response_format,
852
- tool_choice=tool_choice,
853
- run_response=run_response,
854
- ):
855
- for model_response in self._populate_stream_data_and_assistant_message(
856
- stream_data=stream_data,
857
- assistant_message=assistant_message,
858
- model_response_delta=model_response_delta,
859
- ):
860
- yield model_response
861
-
862
- # Add final metrics to assistant message
863
- self._populate_assistant_message(assistant_message=assistant_message, provider_response=model_response_delta)
864
-
865
813
  def _parse_provider_response(self, response: Response, **kwargs) -> ModelResponse:
866
814
  """
867
815
  Parse the OpenAI response into a ModelResponse.
@@ -6,6 +6,7 @@ from pydantic import BaseModel
6
6
 
7
7
  from agno.models.openai.like import OpenAILike
8
8
  from agno.run.agent import RunOutput
9
+ from agno.run.team import TeamRunOutput
9
10
 
10
11
 
11
12
  @dataclass
@@ -34,9 +35,11 @@ class Requesty(OpenAILike):
34
35
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
35
36
  tools: Optional[List[Dict[str, Any]]] = None,
36
37
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
37
- run_response: Optional[RunOutput] = None,
38
+ run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
38
39
  ) -> Dict[str, Any]:
39
- params = super().get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice)
40
+ params = super().get_request_params(
41
+ response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
42
+ )
40
43
 
41
44
  if "extra_body" not in params:
42
45
  params["extra_body"] = {}
agno/models/utils.py CHANGED
@@ -1,20 +1,266 @@
1
+ from typing import Optional, Union
2
+
1
3
  from agno.models.base import Model
2
4
 
3
5
 
4
- # TODO: add all supported models
5
- def get_model(model_id: str, model_provider: str) -> Model:
6
- """Return the right Agno model instance given a pair of model provider and id"""
7
- if model_provider == "openai":
8
- from agno.models.openai import OpenAIChat
6
+ def _get_model_class(model_id: str, model_provider: str) -> Model:
7
+ if model_provider == "aimlapi":
8
+ from agno.models.aimlapi import AIMLAPI
9
+
10
+ return AIMLAPI(id=model_id)
9
11
 
10
- return OpenAIChat(id=model_id)
11
12
  elif model_provider == "anthropic":
12
13
  from agno.models.anthropic import Claude
13
14
 
14
15
  return Claude(id=model_id)
15
- elif model_provider == "gemini":
16
+
17
+ elif model_provider == "aws-bedrock":
18
+ from agno.models.aws import AwsBedrock
19
+
20
+ return AwsBedrock(id=model_id)
21
+
22
+ elif model_provider == "aws-claude":
23
+ from agno.models.aws import Claude as AWSClaude
24
+
25
+ return AWSClaude(id=model_id)
26
+
27
+ elif model_provider == "azure-ai-foundry":
28
+ from agno.models.azure import AzureAIFoundry
29
+
30
+ return AzureAIFoundry(id=model_id)
31
+
32
+ elif model_provider == "azure-openai":
33
+ from agno.models.azure import AzureOpenAI
34
+
35
+ return AzureOpenAI(id=model_id)
36
+
37
+ elif model_provider == "cerebras":
38
+ from agno.models.cerebras import Cerebras
39
+
40
+ return Cerebras(id=model_id)
41
+
42
+ elif model_provider == "cerebras-openai":
43
+ from agno.models.cerebras import CerebrasOpenAI
44
+
45
+ return CerebrasOpenAI(id=model_id)
46
+
47
+ elif model_provider == "cohere":
48
+ from agno.models.cohere import Cohere
49
+
50
+ return Cohere(id=model_id)
51
+
52
+ elif model_provider == "cometapi":
53
+ from agno.models.cometapi import CometAPI
54
+
55
+ return CometAPI(id=model_id)
56
+
57
+ elif model_provider == "dashscope":
58
+ from agno.models.dashscope import DashScope
59
+
60
+ return DashScope(id=model_id)
61
+
62
+ elif model_provider == "deepinfra":
63
+ from agno.models.deepinfra import DeepInfra
64
+
65
+ return DeepInfra(id=model_id)
66
+
67
+ elif model_provider == "deepseek":
68
+ from agno.models.deepseek import DeepSeek
69
+
70
+ return DeepSeek(id=model_id)
71
+
72
+ elif model_provider == "fireworks":
73
+ from agno.models.fireworks import Fireworks
74
+
75
+ return Fireworks(id=model_id)
76
+
77
+ elif model_provider == "google":
16
78
  from agno.models.google import Gemini
17
79
 
18
80
  return Gemini(id=model_id)
81
+
82
+ elif model_provider == "groq":
83
+ from agno.models.groq import Groq
84
+
85
+ return Groq(id=model_id)
86
+
87
+ elif model_provider == "huggingface":
88
+ from agno.models.huggingface import HuggingFace
89
+
90
+ return HuggingFace(id=model_id)
91
+
92
+ elif model_provider == "ibm":
93
+ from agno.models.ibm import WatsonX
94
+
95
+ return WatsonX(id=model_id)
96
+
97
+ elif model_provider == "internlm":
98
+ from agno.models.internlm import InternLM
99
+
100
+ return InternLM(id=model_id)
101
+
102
+ elif model_provider == "langdb":
103
+ from agno.models.langdb import LangDB
104
+
105
+ return LangDB(id=model_id)
106
+
107
+ elif model_provider == "litellm":
108
+ from agno.models.litellm import LiteLLM
109
+
110
+ return LiteLLM(id=model_id)
111
+
112
+ elif model_provider == "litellm-openai":
113
+ from agno.models.litellm import LiteLLMOpenAI
114
+
115
+ return LiteLLMOpenAI(id=model_id)
116
+
117
+ elif model_provider == "llama-cpp":
118
+ from agno.models.llama_cpp import LlamaCpp
119
+
120
+ return LlamaCpp(id=model_id)
121
+
122
+ elif model_provider == "llama-openai":
123
+ from agno.models.meta import LlamaOpenAI
124
+
125
+ return LlamaOpenAI(id=model_id)
126
+
127
+ elif model_provider == "lmstudio":
128
+ from agno.models.lmstudio import LMStudio
129
+
130
+ return LMStudio(id=model_id)
131
+
132
+ elif model_provider == "meta":
133
+ from agno.models.meta import Llama
134
+
135
+ return Llama(id=model_id)
136
+
137
+ elif model_provider == "mistral":
138
+ from agno.models.mistral import MistralChat
139
+
140
+ return MistralChat(id=model_id)
141
+
142
+ elif model_provider == "nebius":
143
+ from agno.models.nebius import Nebius
144
+
145
+ return Nebius(id=model_id)
146
+
147
+ elif model_provider == "nexus":
148
+ from agno.models.nexus import Nexus
149
+
150
+ return Nexus(id=model_id)
151
+
152
+ elif model_provider == "nvidia":
153
+ from agno.models.nvidia import Nvidia
154
+
155
+ return Nvidia(id=model_id)
156
+
157
+ elif model_provider == "ollama":
158
+ from agno.models.ollama import Ollama
159
+
160
+ return Ollama(id=model_id)
161
+
162
+ elif model_provider == "openai":
163
+ from agno.models.openai import OpenAIChat
164
+
165
+ return OpenAIChat(id=model_id)
166
+
167
+ elif model_provider == "openai-responses":
168
+ from agno.models.openai import OpenAIResponses
169
+
170
+ return OpenAIResponses(id=model_id)
171
+
172
+ elif model_provider == "openrouter":
173
+ from agno.models.openrouter import OpenRouter
174
+
175
+ return OpenRouter(id=model_id)
176
+
177
+ elif model_provider == "perplexity":
178
+ from agno.models.perplexity import Perplexity
179
+
180
+ return Perplexity(id=model_id)
181
+
182
+ elif model_provider == "portkey":
183
+ from agno.models.portkey import Portkey
184
+
185
+ return Portkey(id=model_id)
186
+
187
+ elif model_provider == "requesty":
188
+ from agno.models.requesty import Requesty
189
+
190
+ return Requesty(id=model_id)
191
+
192
+ elif model_provider == "sambanova":
193
+ from agno.models.sambanova import Sambanova
194
+
195
+ return Sambanova(id=model_id)
196
+
197
+ elif model_provider == "siliconflow":
198
+ from agno.models.siliconflow import Siliconflow
199
+
200
+ return Siliconflow(id=model_id)
201
+
202
+ elif model_provider == "together":
203
+ from agno.models.together import Together
204
+
205
+ return Together(id=model_id)
206
+
207
+ elif model_provider == "vercel":
208
+ from agno.models.vercel import V0
209
+
210
+ return V0(id=model_id)
211
+
212
+ elif model_provider == "vertexai-claude":
213
+ from agno.models.vertexai.claude import Claude as VertexAIClaude
214
+
215
+ return VertexAIClaude(id=model_id)
216
+
217
+ elif model_provider == "vllm":
218
+ from agno.models.vllm import VLLM
219
+
220
+ return VLLM(id=model_id)
221
+
222
+ elif model_provider == "xai":
223
+ from agno.models.xai import xAI
224
+
225
+ return xAI(id=model_id)
226
+
227
+ else:
228
+ raise ValueError(f"Model provider '{model_provider}' is not supported.")
229
+
230
+
231
+ def _parse_model_string(model_string: str) -> Model:
232
+ if not model_string or not isinstance(model_string, str):
233
+ raise ValueError(f"Model string must be a non-empty string, got: {model_string}")
234
+
235
+ if ":" not in model_string:
236
+ raise ValueError(
237
+ f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
238
+ )
239
+
240
+ parts = model_string.split(":", 1)
241
+ if len(parts) != 2:
242
+ raise ValueError(
243
+ f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
244
+ )
245
+
246
+ model_provider, model_id = parts
247
+ model_provider = model_provider.strip().lower()
248
+ model_id = model_id.strip()
249
+
250
+ if not model_provider or not model_id:
251
+ raise ValueError(
252
+ f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
253
+ )
254
+
255
+ return _get_model_class(model_id, model_provider)
256
+
257
+
258
+ def get_model(model: Union[Model, str, None]) -> Optional[Model]:
259
+ if model is None:
260
+ return None
261
+ elif isinstance(model, Model):
262
+ return model
263
+ elif isinstance(model, str):
264
+ return _parse_model_string(model)
19
265
  else:
20
- raise ValueError(f"Model provider {model_provider} not supported")
266
+ raise ValueError("Model must be a Model instance, string, or None")
@@ -5,10 +5,7 @@ from typing import Any, Dict, Optional
5
5
  from agno.models.anthropic import Claude as AnthropicClaude
6
6
 
7
7
  try:
8
- from anthropic import AnthropicVertex as AnthropicClient
9
- from anthropic import (
10
- AsyncAnthropicVertex as AsyncAnthropicClient,
11
- )
8
+ from anthropic import AnthropicVertex, AsyncAnthropicVertex
12
9
  except ImportError as e:
13
10
  raise ImportError("`anthropic` not installed. Please install it with `pip install anthropic`") from e
14
11
 
@@ -25,15 +22,14 @@ class Claude(AnthropicClaude):
25
22
  name: str = "Claude"
26
23
  provider: str = "VertexAI"
27
24
 
25
+ client: Optional[AnthropicVertex] = None # type: ignore
26
+ async_client: Optional[AsyncAnthropicVertex] = None # type: ignore
27
+
28
28
  # Client parameters
29
29
  region: Optional[str] = None
30
30
  project_id: Optional[str] = None
31
31
  base_url: Optional[str] = None
32
32
 
33
- # Anthropic clients
34
- client: Optional[AnthropicClient] = None
35
- async_client: Optional[AsyncAnthropicClient] = None
36
-
37
33
  def _get_client_params(self) -> Dict[str, Any]:
38
34
  client_params: Dict[str, Any] = {}
39
35
 
@@ -51,7 +47,7 @@ class Claude(AnthropicClaude):
51
47
  client_params["default_headers"] = self.default_headers
52
48
  return client_params
53
49
 
54
- def get_client(self) -> AnthropicClient:
50
+ def get_client(self):
55
51
  """
56
52
  Returns an instance of the Anthropic client.
57
53
  """
@@ -59,16 +55,16 @@ class Claude(AnthropicClaude):
59
55
  return self.client
60
56
 
61
57
  _client_params = self._get_client_params()
62
- self.client = AnthropicClient(**_client_params)
58
+ self.client = AnthropicVertex(**_client_params)
63
59
  return self.client
64
60
 
65
- def get_async_client(self) -> AsyncAnthropicClient:
61
+ def get_async_client(self):
66
62
  """
67
63
  Returns an instance of the async Anthropic client.
68
64
  """
69
- if self.async_client:
65
+ if self.async_client and not self.async_client.is_closed():
70
66
  return self.async_client
71
67
 
72
68
  _client_params = self._get_client_params()
73
- self.async_client = AsyncAnthropicClient(**_client_params)
69
+ self.async_client = AsyncAnthropicVertex(**_client_params)
74
70
  return self.async_client
agno/os/app.py CHANGED
@@ -46,7 +46,7 @@ from agno.os.utils import (
46
46
  update_cors_middleware,
47
47
  )
48
48
  from agno.team.team import Team
49
- from agno.utils.log import logger
49
+ from agno.utils.log import log_debug, log_error, log_warning
50
50
  from agno.utils.string import generate_id, generate_id_from_name
51
51
  from agno.workflow.workflow import Workflow
52
52
 
@@ -454,21 +454,22 @@ class AgentOS:
454
454
 
455
455
  @fastapi_app.exception_handler(HTTPException)
456
456
  async def http_exception_handler(_, exc: HTTPException) -> JSONResponse:
457
+ log_error(f"HTTP exception: {exc.status_code} {exc.detail}")
457
458
  return JSONResponse(
458
459
  status_code=exc.status_code,
459
460
  content={"detail": str(exc.detail)},
460
461
  )
461
462
 
462
- async def general_exception_handler(request: Request, call_next):
463
- try:
464
- return await call_next(request)
465
- except Exception as e:
466
- return JSONResponse(
467
- status_code=e.status_code if hasattr(e, "status_code") else 500, # type: ignore
468
- content={"detail": str(e)},
469
- )
463
+ @fastapi_app.exception_handler(Exception)
464
+ async def general_exception_handler(_: Request, exc: Exception) -> JSONResponse:
465
+ import traceback
466
+
467
+ log_error(f"Unhandled exception:\n{traceback.format_exc(limit=5)}")
470
468
 
471
- fastapi_app.middleware("http")(general_exception_handler)
469
+ return JSONResponse(
470
+ status_code=getattr(exc, "status_code", 500),
471
+ content={"detail": str(exc)},
472
+ )
472
473
 
473
474
  # Update CORS middleware
474
475
  update_cors_middleware(fastapi_app, self.settings.cors_origin_list) # type: ignore
@@ -500,7 +501,7 @@ class AgentOS:
500
501
  # Skip conflicting AgentOS routes, prefer user's existing routes
501
502
  for conflict in conflicts:
502
503
  methods_str = ", ".join(conflict["methods"]) # type: ignore
503
- logger.debug(
504
+ log_debug(
504
505
  f"Skipping conflicting AgentOS route: {methods_str} {conflict['path']} - "
505
506
  f"Using existing custom route instead"
506
507
  )
@@ -519,7 +520,7 @@ class AgentOS:
519
520
  # Log warnings but still add all routes (AgentOS routes will override)
520
521
  for conflict in conflicts:
521
522
  methods_str = ", ".join(conflict["methods"]) # type: ignore
522
- logger.warning(
523
+ log_warning(
523
524
  f"Route conflict detected: {methods_str} {conflict['path']} - "
524
525
  f"AgentOS route will override existing custom route"
525
526
  )
@@ -356,10 +356,10 @@ def attach_routes(
356
356
  ):
357
357
  default_model = deepcopy(agent.model)
358
358
  if eval_run_input.model_id != agent.model.id or eval_run_input.model_provider != agent.model.provider:
359
- model = get_model(
360
- model_id=eval_run_input.model_id.lower(),
361
- model_provider=eval_run_input.model_provider.lower(),
362
- )
359
+ model_provider = eval_run_input.model_provider.lower()
360
+ model_id = eval_run_input.model_id.lower()
361
+ model_string = f"{model_provider}:{model_id}"
362
+ model = get_model(model_string)
363
363
  agent.model = model
364
364
 
365
365
  team = None
@@ -378,10 +378,10 @@ def attach_routes(
378
378
  ):
379
379
  default_model = deepcopy(team.model)
380
380
  if eval_run_input.model_id != team.model.id or eval_run_input.model_provider != team.model.provider:
381
- model = get_model(
382
- model_id=eval_run_input.model_id.lower(),
383
- model_provider=eval_run_input.model_provider.lower(),
384
- )
381
+ model_provider = eval_run_input.model_provider.lower()
382
+ model_id = eval_run_input.model_id.lower()
383
+ model_string = f"{model_provider}:{model_id}"
384
+ model = get_model(model_string)
385
385
  team.model = model
386
386
 
387
387
  agent = None
@@ -33,6 +33,7 @@ async def run_accuracy_eval(
33
33
  additional_context=eval_run_input.additional_context,
34
34
  num_iterations=eval_run_input.num_iterations or 1,
35
35
  name=eval_run_input.name,
36
+ model=default_model,
36
37
  )
37
38
 
38
39
  result = accuracy_eval.run(print_results=False, print_summary=False)