pydantic-ai-slim 0.0.22__tar.gz → 0.0.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (31) hide show
  1. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_agent_graph.py +12 -8
  3. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/agent.py +2 -2
  4. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/__init__.py +39 -37
  5. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/anthropic.py +69 -66
  6. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/cohere.py +56 -68
  7. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/function.py +58 -60
  8. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/gemini.py +132 -99
  9. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/groq.py +79 -72
  10. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/mistral.py +72 -71
  11. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/openai.py +90 -70
  12. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/test.py +81 -93
  13. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/models/vertexai.py +38 -44
  14. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pyproject.toml +3 -3
  15. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/.gitignore +0 -0
  16. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/README.md +0 -0
  17. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/__init__.py +0 -0
  18. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_griffe.py +0 -0
  19. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_parts_manager.py +0 -0
  20. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_pydantic.py +0 -0
  21. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_result.py +0 -0
  22. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_system_prompt.py +0 -0
  23. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/_utils.py +0 -0
  24. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/exceptions.py +0 -0
  25. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/format_as_xml.py +0 -0
  26. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/messages.py +0 -0
  27. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/py.typed +0 -0
  28. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/result.py +0 -0
  29. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/settings.py +0 -0
  30. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/tools.py +0 -0
  31. {pydantic_ai_slim-0.0.22 → pydantic_ai_slim-0.0.23}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.22
3
+ Version: 0.0.23
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -28,7 +28,7 @@ Requires-Dist: eval-type-backport>=0.2.0
28
28
  Requires-Dist: griffe>=1.3.2
29
29
  Requires-Dist: httpx>=0.27
30
30
  Requires-Dist: logfire-api>=1.2.0
31
- Requires-Dist: pydantic-graph==0.0.22
31
+ Requires-Dist: pydantic-graph==0.0.23
32
32
  Requires-Dist: pydantic>=2.10
33
33
  Provides-Extra: anthropic
34
34
  Requires-Dist: anthropic>=0.40.0; extra == 'anthropic'
@@ -41,7 +41,7 @@ Requires-Dist: logfire>=2.3; extra == 'logfire'
41
41
  Provides-Extra: mistral
42
42
  Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
43
43
  Provides-Extra: openai
44
- Requires-Dist: openai>=1.59.0; extra == 'openai'
44
+ Requires-Dist: openai>=1.61.0; extra == 'openai'
45
45
  Provides-Extra: vertexai
46
46
  Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
47
47
  Requires-Dist: requests>=2.32.3; extra == 'vertexai'
@@ -204,9 +204,9 @@ class StreamUserPromptNode(BaseUserPromptNode[DepsT, NodeRunEndT]):
204
204
  return StreamModelRequestNode[DepsT, NodeRunEndT](request=await self._get_first_message(ctx))
205
205
 
206
206
 
207
- async def _prepare_model(
207
+ async def _prepare_request_parameters(
208
208
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
209
- ) -> models.AgentModel:
209
+ ) -> models.ModelRequestParameters:
210
210
  """Build tools and create an agent model."""
211
211
  function_tool_defs: list[ToolDefinition] = []
212
212
 
@@ -220,7 +220,7 @@ async def _prepare_model(
220
220
  await asyncio.gather(*map(add_tool, ctx.deps.function_tools.values()))
221
221
 
222
222
  result_schema = ctx.deps.result_schema
223
- return await run_context.model.agent_model(
223
+ return models.ModelRequestParameters(
224
224
  function_tools=function_tool_defs,
225
225
  allow_text_result=_allow_text_result(result_schema),
226
226
  result_tools=result_schema.tool_defs() if result_schema is not None else [],
@@ -245,13 +245,15 @@ class ModelRequestNode(BaseNode[GraphAgentState, GraphAgentDeps[DepsT, Any], Nod
245
245
  # Increment run_step
246
246
  ctx.state.run_step += 1
247
247
 
248
- with _logfire.span('preparing model and tools {run_step=}', run_step=ctx.state.run_step):
249
- agent_model = await _prepare_model(ctx)
248
+ with _logfire.span('preparing model request params {run_step=}', run_step=ctx.state.run_step):
249
+ model_request_parameters = await _prepare_request_parameters(ctx)
250
250
 
251
251
  # Actually make the model request
252
252
  model_settings = merge_model_settings(ctx.deps.model_settings, None)
253
253
  with _logfire.span('model request') as span:
254
- model_response, request_usage = await agent_model.request(ctx.state.message_history, model_settings)
254
+ model_response, request_usage = await ctx.deps.model.request(
255
+ ctx.state.message_history, model_settings, model_request_parameters
256
+ )
255
257
  span.set_attribute('response', model_response)
256
258
  span.set_attribute('usage', request_usage)
257
259
 
@@ -405,12 +407,14 @@ class StreamModelRequestNode(BaseNode[GraphAgentState, GraphAgentDeps[DepsT, Any
405
407
  ctx.state.run_step += 1
406
408
 
407
409
  with _logfire.span('preparing model and tools {run_step=}', run_step=ctx.state.run_step):
408
- agent_model = await _prepare_model(ctx)
410
+ model_request_parameters = await _prepare_request_parameters(ctx)
409
411
 
410
412
  # Actually make the model request
411
413
  model_settings = merge_model_settings(ctx.deps.model_settings, None)
412
414
  with _logfire.span('model request {run_step=}', run_step=ctx.state.run_step) as model_req_span:
413
- async with agent_model.request_stream(ctx.state.message_history, model_settings) as streamed_response:
415
+ async with ctx.deps.model.request_stream(
416
+ ctx.state.message_history, model_settings, model_request_parameters
417
+ ) as streamed_response:
414
418
  ctx.state.usage.requests += 1
415
419
  model_req_span.set_attribute('response_type', streamed_response.__class__.__name__)
416
420
  # We want to end the "model request" span here, but we can't exit the context manager
@@ -309,7 +309,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
309
309
  '{agent_name} run {prompt=}',
310
310
  prompt=user_prompt,
311
311
  agent=self,
312
- model_name=model_used.name() if model_used else 'no-model',
312
+ model_name=model_used.model_name if model_used else 'no-model',
313
313
  agent_name=self.name or 'agent',
314
314
  ) as run_span:
315
315
  # Build the deps object for the graph
@@ -554,7 +554,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
554
554
  '{agent_name} run stream {prompt=}',
555
555
  prompt=user_prompt,
556
556
  agent=self,
557
- model_name=model_used.name(),
557
+ model_name=model_used.model_name if model_used else 'no-model',
558
558
  agent_name=self.name or 'agent',
559
559
  ) as run_span:
560
560
  # Build the deps object for the graph
@@ -112,6 +112,8 @@ KnownModelName = Literal[
112
112
  'o1-mini-2024-09-12',
113
113
  'o1-preview',
114
114
  'o1-preview-2024-09-12',
115
+ 'o3-mini',
116
+ 'o3-mini-2025-01-31',
115
117
  'openai:chatgpt-4o-latest',
116
118
  'openai:gpt-3.5-turbo',
117
119
  'openai:gpt-3.5-turbo-0125',
@@ -149,6 +151,8 @@ KnownModelName = Literal[
149
151
  'openai:o1-mini-2024-09-12',
150
152
  'openai:o1-preview',
151
153
  'openai:o1-preview-2024-09-12',
154
+ 'openai:o3-mini',
155
+ 'openai:o3-mini-2025-01-31',
152
156
  'test',
153
157
  ]
154
158
  """Known model names that can be used with the `model` parameter of [`Agent`][pydantic_ai.Agent].
@@ -157,49 +161,37 @@ KnownModelName = Literal[
157
161
  """
158
162
 
159
163
 
160
- class Model(ABC):
161
- """Abstract class for a model."""
164
+ @dataclass
165
+ class ModelRequestParameters:
166
+ """Configuration for an agent's request to a model, specifically related to tools and result handling."""
162
167
 
163
- @abstractmethod
164
- async def agent_model(
165
- self,
166
- *,
167
- function_tools: list[ToolDefinition],
168
- allow_text_result: bool,
169
- result_tools: list[ToolDefinition],
170
- ) -> AgentModel:
171
- """Create an agent model, this is called for each step of an agent run.
172
-
173
- This is async in case slow/async config checks need to be performed that can't be done in `__init__`.
174
-
175
- Args:
176
- function_tools: The tools available to the agent.
177
- allow_text_result: Whether a plain text final response/result is permitted.
178
- result_tools: Tool definitions for the final result tool(s), if any.
179
-
180
- Returns:
181
- An agent model.
182
- """
183
- raise NotImplementedError()
168
+ function_tools: list[ToolDefinition]
169
+ allow_text_result: bool
170
+ result_tools: list[ToolDefinition]
184
171
 
185
- @abstractmethod
186
- def name(self) -> str:
187
- raise NotImplementedError()
188
172
 
173
+ class Model(ABC):
174
+ """Abstract class for a model."""
189
175
 
190
- class AgentModel(ABC):
191
- """Model configured for each step of an Agent run."""
176
+ _model_name: str
177
+ _system: str | None
192
178
 
193
179
  @abstractmethod
194
180
  async def request(
195
- self, messages: list[ModelMessage], model_settings: ModelSettings | None
181
+ self,
182
+ messages: list[ModelMessage],
183
+ model_settings: ModelSettings | None,
184
+ model_request_parameters: ModelRequestParameters,
196
185
  ) -> tuple[ModelResponse, Usage]:
197
186
  """Make a request to the model."""
198
187
  raise NotImplementedError()
199
188
 
200
189
  @asynccontextmanager
201
190
  async def request_stream(
202
- self, messages: list[ModelMessage], model_settings: ModelSettings | None
191
+ self,
192
+ messages: list[ModelMessage],
193
+ model_settings: ModelSettings | None,
194
+ model_request_parameters: ModelRequestParameters,
203
195
  ) -> AsyncIterator[StreamedResponse]:
204
196
  """Make a request to the model and return a streaming response."""
205
197
  # This method is not required, but you need to implement it if you want to support streamed responses
@@ -208,6 +200,16 @@ class AgentModel(ABC):
208
200
  # noinspection PyUnreachableCode
209
201
  yield # pragma: no cover
210
202
 
203
+ @property
204
+ def model_name(self) -> str:
205
+ """The model name."""
206
+ return self._model_name
207
+
208
+ @property
209
+ def system(self) -> str | None:
210
+ """The system / model provider, ex: openai."""
211
+ return self._system
212
+
211
213
 
212
214
  @dataclass
213
215
  class StreamedResponse(ABC):
@@ -270,7 +272,7 @@ def check_allow_model_requests() -> None:
270
272
  """Check if model requests are allowed.
271
273
 
272
274
  If you're defining your own models that have costs or latency associated with their use, you should call this in
273
- [`Model.agent_model`][pydantic_ai.models.Model.agent_model].
275
+ [`Model.request`][pydantic_ai.models.Model.request] and [`Model.request_stream`][pydantic_ai.models.Model.request_stream].
274
276
 
275
277
  Raises:
276
278
  RuntimeError: If model requests are not allowed.
@@ -311,33 +313,33 @@ def infer_model(model: Model | KnownModelName) -> Model:
311
313
  from .openai import OpenAIModel
312
314
 
313
315
  return OpenAIModel(model[7:])
314
- elif model.startswith(('gpt', 'o1')):
316
+ elif model.startswith(('gpt', 'o1', 'o3')):
315
317
  from .openai import OpenAIModel
316
318
 
317
319
  return OpenAIModel(model)
318
320
  elif model.startswith('google-gla'):
319
321
  from .gemini import GeminiModel
320
322
 
321
- return GeminiModel(model[11:]) # pyright: ignore[reportArgumentType]
323
+ return GeminiModel(model[11:])
322
324
  # backwards compatibility with old model names (ex, gemini-1.5-flash -> google-gla:gemini-1.5-flash)
323
325
  elif model.startswith('gemini'):
324
326
  from .gemini import GeminiModel
325
327
 
326
328
  # noinspection PyTypeChecker
327
- return GeminiModel(model) # pyright: ignore[reportArgumentType]
329
+ return GeminiModel(model)
328
330
  elif model.startswith('groq:'):
329
331
  from .groq import GroqModel
330
332
 
331
- return GroqModel(model[5:]) # pyright: ignore[reportArgumentType]
333
+ return GroqModel(model[5:])
332
334
  elif model.startswith('google-vertex'):
333
335
  from .vertexai import VertexAIModel
334
336
 
335
- return VertexAIModel(model[14:]) # pyright: ignore[reportArgumentType]
337
+ return VertexAIModel(model[14:])
336
338
  # backwards compatibility with old model names (ex, vertexai:gemini-1.5-flash -> google-vertex:gemini-1.5-flash)
337
339
  elif model.startswith('vertexai:'):
338
340
  from .vertexai import VertexAIModel
339
341
 
340
- return VertexAIModel(model[9:]) # pyright: ignore[reportArgumentType]
342
+ return VertexAIModel(model[9:])
341
343
  elif model.startswith('mistral:'):
342
344
  from .mistral import MistralModel
343
345
 
@@ -28,8 +28,8 @@ from ..messages import (
28
28
  from ..settings import ModelSettings
29
29
  from ..tools import ToolDefinition
30
30
  from . import (
31
- AgentModel,
32
31
  Model,
32
+ ModelRequestParameters,
33
33
  StreamedResponse,
34
34
  cached_async_http_client,
35
35
  check_allow_model_requests,
@@ -68,14 +68,14 @@ LatestAnthropicModelNames = Literal[
68
68
  'claude-3-5-sonnet-latest',
69
69
  'claude-3-opus-latest',
70
70
  ]
71
- """Latest named Anthropic models."""
71
+ """Latest Anthropic models."""
72
72
 
73
73
  AnthropicModelName = Union[str, LatestAnthropicModelNames]
74
74
  """Possible Anthropic model names.
75
75
 
76
76
  Since Anthropic supports a variety of date-stamped models, we explicitly list the latest models but
77
77
  allow any name in the type hints.
78
- Since [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models) for a full list.
78
+ See [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models) for a full list.
79
79
  """
80
80
 
81
81
 
@@ -101,9 +101,11 @@ class AnthropicModel(Model):
101
101
  We anticipate adding support for streaming responses in a near-term future release.
102
102
  """
103
103
 
104
- model_name: AnthropicModelName
105
104
  client: AsyncAnthropic = field(repr=False)
106
105
 
106
+ _model_name: AnthropicModelName = field(repr=False)
107
+ _system: str | None = field(default='anthropic', repr=False)
108
+
107
109
  def __init__(
108
110
  self,
109
111
  model_name: AnthropicModelName,
@@ -124,7 +126,7 @@ class AnthropicModel(Model):
124
126
  client to use, if provided, `api_key` and `http_client` must be `None`.
125
127
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
126
128
  """
127
- self.model_name = model_name
129
+ self._model_name = model_name
128
130
  if anthropic_client is not None:
129
131
  assert http_client is None, 'Cannot provide both `anthropic_client` and `http_client`'
130
132
  assert api_key is None, 'Cannot provide both `anthropic_client` and `api_key`'
@@ -134,81 +136,67 @@ class AnthropicModel(Model):
134
136
  else:
135
137
  self.client = AsyncAnthropic(api_key=api_key, http_client=cached_async_http_client())
136
138
 
137
- async def agent_model(
139
+ async def request(
138
140
  self,
139
- *,
140
- function_tools: list[ToolDefinition],
141
- allow_text_result: bool,
142
- result_tools: list[ToolDefinition],
143
- ) -> AgentModel:
141
+ messages: list[ModelMessage],
142
+ model_settings: ModelSettings | None,
143
+ model_request_parameters: ModelRequestParameters,
144
+ ) -> tuple[ModelResponse, usage.Usage]:
144
145
  check_allow_model_requests()
145
- tools = [self._map_tool_definition(r) for r in function_tools]
146
- if result_tools:
147
- tools += [self._map_tool_definition(r) for r in result_tools]
148
- return AnthropicAgentModel(
149
- self.client,
150
- self.model_name,
151
- allow_text_result,
152
- tools,
146
+ response = await self._messages_create(
147
+ messages, False, cast(AnthropicModelSettings, model_settings or {}), model_request_parameters
153
148
  )
154
-
155
- def name(self) -> str:
156
- return f'anthropic:{self.model_name}'
157
-
158
- @staticmethod
159
- def _map_tool_definition(f: ToolDefinition) -> ToolParam:
160
- return {
161
- 'name': f.name,
162
- 'description': f.description,
163
- 'input_schema': f.parameters_json_schema,
164
- }
165
-
166
-
167
- @dataclass
168
- class AnthropicAgentModel(AgentModel):
169
- """Implementation of `AgentModel` for Anthropic models."""
170
-
171
- client: AsyncAnthropic
172
- model_name: AnthropicModelName
173
- allow_text_result: bool
174
- tools: list[ToolParam]
175
-
176
- async def request(
177
- self, messages: list[ModelMessage], model_settings: ModelSettings | None
178
- ) -> tuple[ModelResponse, usage.Usage]:
179
- response = await self._messages_create(messages, False, cast(AnthropicModelSettings, model_settings or {}))
180
149
  return self._process_response(response), _map_usage(response)
181
150
 
182
151
  @asynccontextmanager
183
152
  async def request_stream(
184
- self, messages: list[ModelMessage], model_settings: ModelSettings | None
153
+ self,
154
+ messages: list[ModelMessage],
155
+ model_settings: ModelSettings | None,
156
+ model_request_parameters: ModelRequestParameters,
185
157
  ) -> AsyncIterator[StreamedResponse]:
186
- response = await self._messages_create(messages, True, cast(AnthropicModelSettings, model_settings or {}))
158
+ check_allow_model_requests()
159
+ response = await self._messages_create(
160
+ messages, True, cast(AnthropicModelSettings, model_settings or {}), model_request_parameters
161
+ )
187
162
  async with response:
188
163
  yield await self._process_streamed_response(response)
189
164
 
190
165
  @overload
191
166
  async def _messages_create(
192
- self, messages: list[ModelMessage], stream: Literal[True], model_settings: AnthropicModelSettings
167
+ self,
168
+ messages: list[ModelMessage],
169
+ stream: Literal[True],
170
+ model_settings: AnthropicModelSettings,
171
+ model_request_parameters: ModelRequestParameters,
193
172
  ) -> AsyncStream[RawMessageStreamEvent]:
194
173
  pass
195
174
 
196
175
  @overload
197
176
  async def _messages_create(
198
- self, messages: list[ModelMessage], stream: Literal[False], model_settings: AnthropicModelSettings
177
+ self,
178
+ messages: list[ModelMessage],
179
+ stream: Literal[False],
180
+ model_settings: AnthropicModelSettings,
181
+ model_request_parameters: ModelRequestParameters,
199
182
  ) -> AnthropicMessage:
200
183
  pass
201
184
 
202
185
  async def _messages_create(
203
- self, messages: list[ModelMessage], stream: bool, model_settings: AnthropicModelSettings
186
+ self,
187
+ messages: list[ModelMessage],
188
+ stream: bool,
189
+ model_settings: AnthropicModelSettings,
190
+ model_request_parameters: ModelRequestParameters,
204
191
  ) -> AnthropicMessage | AsyncStream[RawMessageStreamEvent]:
205
192
  # standalone function to make it easier to override
193
+ tools = self._get_tools(model_request_parameters)
206
194
  tool_choice: ToolChoiceParam | None
207
195
 
208
- if not self.tools:
196
+ if not tools:
209
197
  tool_choice = None
210
198
  else:
211
- if not self.allow_text_result:
199
+ if not model_request_parameters.allow_text_result:
212
200
  tool_choice = {'type': 'any'}
213
201
  else:
214
202
  tool_choice = {'type': 'auto'}
@@ -222,8 +210,8 @@ class AnthropicAgentModel(AgentModel):
222
210
  max_tokens=model_settings.get('max_tokens', 1024),
223
211
  system=system_prompt or NOT_GIVEN,
224
212
  messages=anthropic_messages,
225
- model=self.model_name,
226
- tools=self.tools or NOT_GIVEN,
213
+ model=self._model_name,
214
+ tools=tools or NOT_GIVEN,
227
215
  tool_choice=tool_choice or NOT_GIVEN,
228
216
  stream=stream,
229
217
  temperature=model_settings.get('temperature', NOT_GIVEN),
@@ -248,7 +236,7 @@ class AnthropicAgentModel(AgentModel):
248
236
  )
249
237
  )
250
238
 
251
- return ModelResponse(items, model_name=self.model_name)
239
+ return ModelResponse(items, model_name=self._model_name)
252
240
 
253
241
  async def _process_streamed_response(self, response: AsyncStream[RawMessageStreamEvent]) -> StreamedResponse:
254
242
  peekable_response = _utils.PeekableAsyncStream(response)
@@ -258,10 +246,17 @@ class AnthropicAgentModel(AgentModel):
258
246
 
259
247
  # Since Anthropic doesn't provide a timestamp in the message, we'll use the current time
260
248
  timestamp = datetime.now(tz=timezone.utc)
261
- return AnthropicStreamedResponse(_model_name=self.model_name, _response=peekable_response, _timestamp=timestamp)
249
+ return AnthropicStreamedResponse(
250
+ _model_name=self._model_name, _response=peekable_response, _timestamp=timestamp
251
+ )
262
252
 
263
- @staticmethod
264
- def _map_message(messages: list[ModelMessage]) -> tuple[str, list[MessageParam]]:
253
+ def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ToolParam]:
254
+ tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
255
+ if model_request_parameters.result_tools:
256
+ tools += [self._map_tool_definition(r) for r in model_request_parameters.result_tools]
257
+ return tools
258
+
259
+ def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[MessageParam]]:
265
260
  """Just maps a `pydantic_ai.Message` to a `anthropic.types.MessageParam`."""
266
261
  system_prompt: str = ''
267
262
  anthropic_messages: list[MessageParam] = []
@@ -310,20 +305,28 @@ class AnthropicAgentModel(AgentModel):
310
305
  content.append(TextBlockParam(text=item.content, type='text'))
311
306
  else:
312
307
  assert isinstance(item, ToolCallPart)
313
- content.append(_map_tool_call(item))
308
+ content.append(self._map_tool_call(item))
314
309
  anthropic_messages.append(MessageParam(role='assistant', content=content))
315
310
  else:
316
311
  assert_never(m)
317
312
  return system_prompt, anthropic_messages
318
313
 
314
+ @staticmethod
315
+ def _map_tool_call(t: ToolCallPart) -> ToolUseBlockParam:
316
+ return ToolUseBlockParam(
317
+ id=_guard_tool_call_id(t=t, model_source='Anthropic'),
318
+ type='tool_use',
319
+ name=t.tool_name,
320
+ input=t.args_as_dict(),
321
+ )
319
322
 
320
- def _map_tool_call(t: ToolCallPart) -> ToolUseBlockParam:
321
- return ToolUseBlockParam(
322
- id=_guard_tool_call_id(t=t, model_source='Anthropic'),
323
- type='tool_use',
324
- name=t.tool_name,
325
- input=t.args_as_dict(),
326
- )
323
+ @staticmethod
324
+ def _map_tool_definition(f: ToolDefinition) -> ToolParam:
325
+ return {
326
+ 'name': f.name,
327
+ 'description': f.description,
328
+ 'input_schema': f.parameters_json_schema,
329
+ }
327
330
 
328
331
 
329
332
  def _map_usage(message: AnthropicMessage | RawMessageStreamEvent) -> usage.Usage: