pydantic-ai-slim 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -71,6 +71,7 @@ class ModelResponsePartsManager:
71
71
  *,
72
72
  vendor_part_id: VendorId | None,
73
73
  content: str,
74
+ id: str | None = None,
74
75
  thinking_tags: tuple[str, str] | None = None,
75
76
  ignore_leading_whitespace: bool = False,
76
77
  ) -> ModelResponseStreamEvent | None:
@@ -85,6 +86,7 @@ class ModelResponsePartsManager:
85
86
  of text. If None, a new part will be created unless the latest part is already
86
87
  a TextPart.
87
88
  content: The text content to append to the appropriate TextPart.
89
+ id: An optional id for the text part.
88
90
  thinking_tags: If provided, will handle content between the thinking tags as thinking parts.
89
91
  ignore_leading_whitespace: If True, will ignore leading whitespace in the content.
90
92
 
@@ -137,7 +139,7 @@ class ModelResponsePartsManager:
137
139
 
138
140
  # There is no existing text part that should be updated, so create a new one
139
141
  new_part_index = len(self._parts)
140
- part = TextPart(content=content)
142
+ part = TextPart(content=content, id=id)
141
143
  if vendor_part_id is not None:
142
144
  self._vendor_id_to_part_index[vendor_part_id] = new_part_index
143
145
  self._parts.append(part)
@@ -198,16 +200,16 @@ class ModelResponsePartsManager:
198
200
  existing_thinking_part_and_index = existing_part, part_index
199
201
 
200
202
  if existing_thinking_part_and_index is None:
201
- if content is not None:
203
+ if content is not None or signature is not None:
202
204
  # There is no existing thinking part that should be updated, so create a new one
203
205
  new_part_index = len(self._parts)
204
- part = ThinkingPart(content=content, id=id, signature=signature, provider_name=provider_name)
206
+ part = ThinkingPart(content=content or '', id=id, signature=signature, provider_name=provider_name)
205
207
  if vendor_part_id is not None: # pragma: no branch
206
208
  self._vendor_id_to_part_index[vendor_part_id] = new_part_index
207
209
  self._parts.append(part)
208
210
  return PartStartEvent(index=new_part_index, part=part)
209
211
  else:
210
- raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content')
212
+ raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content or signature')
211
213
  else:
212
214
  if content is not None or signature is not None:
213
215
  # Update the existing ThinkingPart with the new content and/or signature delta
pydantic_ai/messages.py CHANGED
@@ -870,6 +870,9 @@ class TextPart:
870
870
 
871
871
  _: KW_ONLY
872
872
 
873
+ id: str | None = None
874
+ """An optional identifier of the text part."""
875
+
873
876
  part_kind: Literal['text'] = 'text'
874
877
  """Part type identifier, this is available on all parts as a discriminator."""
875
878
 
@@ -718,7 +718,11 @@ def infer_model(model: Model | KnownModelName | str) -> Model: # noqa: C901
718
718
  )
719
719
  provider = 'google-vertex'
720
720
 
721
- if provider == 'cohere':
721
+ if provider == 'gateway':
722
+ from ..providers.gateway import infer_model as infer_model_from_gateway
723
+
724
+ return infer_model_from_gateway(model_name)
725
+ elif provider == 'cohere':
722
726
  from .cohere import CohereModel
723
727
 
724
728
  return CohereModel(model_name, provider=provider)
@@ -641,7 +641,6 @@ class AnthropicStreamedResponse(StreamedResponse):
641
641
  yield self._parts_manager.handle_thinking_delta(
642
642
  vendor_part_id=event.index,
643
643
  id='redacted_thinking',
644
- content='',
645
644
  signature=current_block.data,
646
645
  provider_name=self.provider_name,
647
646
  )
@@ -681,7 +681,6 @@ class BedrockStreamedResponse(StreamedResponse):
681
681
  yield self._parts_manager.handle_thinking_delta(
682
682
  vendor_part_id=index,
683
683
  id='redacted_content',
684
- content='',
685
684
  signature=redacted_content.decode('utf-8'),
686
685
  provider_name=self.provider_name,
687
686
  )
@@ -596,7 +596,6 @@ class GeminiStreamedResponse(StreamedResponse):
596
596
  signature = base64.b64encode(part.thought_signature).decode('utf-8')
597
597
  yield self._parts_manager.handle_thinking_delta(
598
598
  vendor_part_id='thinking',
599
- content='', # A thought signature may occur without a preceding thinking part, so we add an empty delta so that a new part can be created
600
599
  signature=signature,
601
600
  provider_name=self.provider_name,
602
601
  )
@@ -82,7 +82,7 @@ try:
82
82
  from mistralai.models.usermessage import UserMessage as MistralUserMessage
83
83
  from mistralai.types.basemodel import Unset as MistralUnset
84
84
  from mistralai.utils.eventstreaming import EventStreamAsync as MistralEventStreamAsync
85
- except ImportError as e:
85
+ except ImportError as e: # pragma: lax no cover
86
86
  raise ImportError(
87
87
  'Please install `mistral` to use the Mistral model, '
88
88
  'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
@@ -190,10 +190,19 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
190
190
  This can be useful for debugging and understanding the model's reasoning process.
191
191
  One of `concise` or `detailed`.
192
192
 
193
- Check the [OpenAI Computer use documentation](https://platform.openai.com/docs/guides/tools-computer-use#1-send-a-request-to-the-model)
193
+ Check the [OpenAI Reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries)
194
194
  for more details.
195
195
  """
196
196
 
197
+ openai_send_reasoning_ids: bool
198
+ """Whether to send reasoning IDs from the message history to the model. Enabled by default.
199
+
200
+ This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
201
+ if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
202
+ for example if you're using a [history processor](../../message-history.md#processing-message-history).
203
+ In that case, you'll want to disable this.
204
+ """
205
+
197
206
  openai_truncation: Literal['disabled', 'auto']
198
207
  """The truncation strategy to use for the model response.
199
208
 
@@ -859,26 +868,38 @@ class OpenAIResponsesModel(Model):
859
868
  for item in response.output:
860
869
  if isinstance(item, responses.ResponseReasoningItem):
861
870
  signature = item.encrypted_content
862
- for summary in item.summary:
863
- # We use the same id for all summaries so that we can merge them on the round trip.
864
- # We only need to store the signature once.
871
+ if item.summary:
872
+ for summary in item.summary:
873
+ # We use the same id for all summaries so that we can merge them on the round trip.
874
+ items.append(
875
+ ThinkingPart(
876
+ content=summary.text,
877
+ id=item.id,
878
+ signature=signature,
879
+ provider_name=self.system if signature else None,
880
+ )
881
+ )
882
+ # We only need to store the signature once.
883
+ signature = None
884
+ elif signature:
865
885
  items.append(
866
886
  ThinkingPart(
867
- content=summary.text,
887
+ content='',
868
888
  id=item.id,
869
889
  signature=signature,
870
- provider_name=self.system if signature else None,
890
+ provider_name=self.system,
871
891
  )
872
892
  )
873
- signature = None
874
893
  # NOTE: We don't currently handle the raw CoT from gpt-oss `reasoning_text`: https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot
875
894
  # If you need this, please file an issue.
876
895
  elif isinstance(item, responses.ResponseOutputMessage):
877
896
  for content in item.content:
878
897
  if isinstance(content, responses.ResponseOutputText): # pragma: no branch
879
- items.append(TextPart(content.text))
898
+ items.append(TextPart(content.text, id=item.id))
880
899
  elif isinstance(item, responses.ResponseFunctionToolCall):
881
- items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
900
+ items.append(
901
+ ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
902
+ )
882
903
 
883
904
  finish_reason: FinishReason | None = None
884
905
  provider_details: dict[str, Any] | None = None
@@ -956,7 +977,7 @@ class OpenAIResponsesModel(Model):
956
977
  else:
957
978
  tool_choice = 'auto'
958
979
 
959
- instructions, openai_messages = await self._map_messages(messages)
980
+ instructions, openai_messages = await self._map_messages(messages, model_settings)
960
981
  reasoning = self._get_reasoning(model_settings)
961
982
 
962
983
  text: responses.ResponseTextConfigParam | None = None
@@ -980,10 +1001,15 @@ class OpenAIResponsesModel(Model):
980
1001
  text = text or {}
981
1002
  text['verbosity'] = verbosity
982
1003
 
983
- unsupported_model_settings = OpenAIModelProfile.from_profile(self.profile).openai_unsupported_model_settings
1004
+ profile = OpenAIModelProfile.from_profile(self.profile)
1005
+ unsupported_model_settings = profile.openai_unsupported_model_settings
984
1006
  for setting in unsupported_model_settings:
985
1007
  model_settings.pop(setting, None)
986
1008
 
1009
+ include: list[responses.ResponseIncludable] | None = None
1010
+ if profile.openai_supports_encrypted_reasoning_content:
1011
+ include = ['reasoning.encrypted_content']
1012
+
987
1013
  try:
988
1014
  extra_headers = model_settings.get('extra_headers', {})
989
1015
  extra_headers.setdefault('User-Agent', get_user_agent())
@@ -1004,7 +1030,7 @@ class OpenAIResponsesModel(Model):
1004
1030
  reasoning=reasoning,
1005
1031
  user=model_settings.get('openai_user', NOT_GIVEN),
1006
1032
  text=text or NOT_GIVEN,
1007
- include=['reasoning.encrypted_content'],
1033
+ include=include or NOT_GIVEN,
1008
1034
  extra_headers=extra_headers,
1009
1035
  extra_body=model_settings.get('extra_body'),
1010
1036
  )
@@ -1067,7 +1093,7 @@ class OpenAIResponsesModel(Model):
1067
1093
  }
1068
1094
 
1069
1095
  async def _map_messages( # noqa: C901
1070
- self, messages: list[ModelMessage]
1096
+ self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1071
1097
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
1072
1098
  """Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
1073
1099
  openai_messages: list[responses.ResponseInputItemParam] = []
@@ -1079,13 +1105,14 @@ class OpenAIResponsesModel(Model):
1079
1105
  elif isinstance(part, UserPromptPart):
1080
1106
  openai_messages.append(await self._map_user_prompt(part))
1081
1107
  elif isinstance(part, ToolReturnPart):
1082
- openai_messages.append(
1083
- FunctionCallOutput(
1084
- type='function_call_output',
1085
- call_id=_guard_tool_call_id(t=part),
1086
- output=part.model_response_str(),
1087
- )
1108
+ call_id = _guard_tool_call_id(t=part)
1109
+ call_id, _ = _split_combined_tool_call_id(call_id)
1110
+ item = FunctionCallOutput(
1111
+ type='function_call_output',
1112
+ call_id=call_id,
1113
+ output=part.model_response_str(),
1088
1114
  )
1115
+ openai_messages.append(item)
1089
1116
  elif isinstance(part, RetryPromptPart):
1090
1117
  # TODO(Marcelo): How do we test this conditional branch?
1091
1118
  if part.tool_name is None: # pragma: no cover
@@ -1093,40 +1120,81 @@ class OpenAIResponsesModel(Model):
1093
1120
  Message(role='user', content=[{'type': 'input_text', 'text': part.model_response()}])
1094
1121
  )
1095
1122
  else:
1096
- openai_messages.append(
1097
- FunctionCallOutput(
1098
- type='function_call_output',
1099
- call_id=_guard_tool_call_id(t=part),
1100
- output=part.model_response(),
1101
- )
1123
+ call_id = _guard_tool_call_id(t=part)
1124
+ call_id, _ = _split_combined_tool_call_id(call_id)
1125
+ item = FunctionCallOutput(
1126
+ type='function_call_output',
1127
+ call_id=call_id,
1128
+ output=part.model_response(),
1102
1129
  )
1130
+ openai_messages.append(item)
1103
1131
  else:
1104
1132
  assert_never(part)
1105
1133
  elif isinstance(message, ModelResponse):
1134
+ message_item: responses.ResponseOutputMessageParam | None = None
1106
1135
  reasoning_item: responses.ResponseReasoningItemParam | None = None
1107
1136
  for item in message.parts:
1108
1137
  if isinstance(item, TextPart):
1109
- openai_messages.append(responses.EasyInputMessageParam(role='assistant', content=item.content))
1138
+ if item.id and item.id.startswith('msg_'):
1139
+ if message_item is None or message_item['id'] != item.id: # pragma: no branch
1140
+ message_item = responses.ResponseOutputMessageParam(
1141
+ role='assistant',
1142
+ id=item.id or _utils.generate_tool_call_id(),
1143
+ content=[],
1144
+ type='message',
1145
+ status='completed',
1146
+ )
1147
+ openai_messages.append(message_item)
1148
+
1149
+ message_item['content'] = [
1150
+ *message_item['content'],
1151
+ responses.ResponseOutputTextParam(
1152
+ text=item.content, type='output_text', annotations=[]
1153
+ ),
1154
+ ]
1155
+ else:
1156
+ openai_messages.append(
1157
+ responses.EasyInputMessageParam(role='assistant', content=item.content)
1158
+ )
1110
1159
  elif isinstance(item, ToolCallPart):
1111
1160
  openai_messages.append(self._map_tool_call(item))
1112
1161
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart):
1113
1162
  # We don't currently track built-in tool calls from OpenAI
1114
1163
  pass
1115
1164
  elif isinstance(item, ThinkingPart):
1116
- if reasoning_item is not None and item.id == reasoning_item['id']:
1117
- reasoning_item['summary'] = [
1118
- *reasoning_item['summary'],
1119
- Summary(text=item.content, type='summary_text'),
1120
- ]
1121
- continue
1165
+ if (
1166
+ item.id
1167
+ and item.provider_name == self.system
1168
+ and OpenAIModelProfile.from_profile(
1169
+ self.profile
1170
+ ).openai_supports_encrypted_reasoning_content
1171
+ and model_settings.get('openai_send_reasoning_ids', True)
1172
+ ):
1173
+ if (
1174
+ reasoning_item is None
1175
+ or reasoning_item['id'] != item.id
1176
+ and (item.signature or item.content)
1177
+ ): # pragma: no branch
1178
+ reasoning_item = responses.ResponseReasoningItemParam(
1179
+ id=item.id,
1180
+ summary=[],
1181
+ encrypted_content=item.signature,
1182
+ type='reasoning',
1183
+ )
1184
+ openai_messages.append(reasoning_item)
1122
1185
 
1123
- reasoning_item = responses.ResponseReasoningItemParam(
1124
- id=item.id or _utils.generate_tool_call_id(),
1125
- summary=[Summary(text=item.content, type='summary_text')],
1126
- encrypted_content=item.signature if item.provider_name == self.system else None,
1127
- type='reasoning',
1128
- )
1129
- openai_messages.append(reasoning_item)
1186
+ if item.content:
1187
+ reasoning_item['summary'] = [
1188
+ *reasoning_item['summary'],
1189
+ Summary(text=item.content, type='summary_text'),
1190
+ ]
1191
+ else:
1192
+ start_tag, end_tag = self.profile.thinking_tags
1193
+ openai_messages.append(
1194
+ responses.EasyInputMessageParam(
1195
+ role='assistant', content='\n'.join([start_tag, item.content, end_tag])
1196
+ )
1197
+ )
1130
1198
  else:
1131
1199
  assert_never(item)
1132
1200
  else:
@@ -1136,12 +1204,18 @@ class OpenAIResponsesModel(Model):
1136
1204
 
1137
1205
  @staticmethod
1138
1206
  def _map_tool_call(t: ToolCallPart) -> responses.ResponseFunctionToolCallParam:
1139
- return responses.ResponseFunctionToolCallParam(
1140
- arguments=t.args_as_json_str(),
1141
- call_id=_guard_tool_call_id(t=t),
1207
+ call_id = _guard_tool_call_id(t=t)
1208
+ call_id, id = _split_combined_tool_call_id(call_id)
1209
+
1210
+ param = responses.ResponseFunctionToolCallParam(
1142
1211
  name=t.tool_name,
1212
+ arguments=t.args_as_json_str(),
1213
+ call_id=call_id,
1143
1214
  type='function_call',
1144
1215
  )
1216
+ if id: # pragma: no branch
1217
+ param['id'] = id
1218
+ return param
1145
1219
 
1146
1220
  def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
1147
1221
  response_format_param: responses.ResponseFormatTextJSONSchemaConfigParam = {
@@ -1360,7 +1434,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1360
1434
  vendor_part_id=chunk.item.id,
1361
1435
  tool_name=chunk.item.name,
1362
1436
  args=chunk.item.arguments,
1363
- tool_call_id=chunk.item.call_id,
1437
+ tool_call_id=_combine_tool_call_ids(chunk.item.call_id, chunk.item.id),
1364
1438
  )
1365
1439
  elif isinstance(chunk.item, responses.ResponseReasoningItem):
1366
1440
  pass
@@ -1376,15 +1450,14 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1376
1450
 
1377
1451
  elif isinstance(chunk, responses.ResponseOutputItemDoneEvent):
1378
1452
  if isinstance(chunk.item, responses.ResponseReasoningItem):
1379
- # Add the signature to the part corresponding to the first summary item
1380
- signature = chunk.item.encrypted_content
1381
- yield self._parts_manager.handle_thinking_delta(
1382
- vendor_part_id=f'{chunk.item.id}-0',
1383
- id=chunk.item.id,
1384
- signature=signature,
1385
- provider_name=self.provider_name if signature else None,
1386
- )
1387
- pass
1453
+ if signature := chunk.item.encrypted_content: # pragma: no branch
1454
+ # Add the signature to the part corresponding to the first summary item
1455
+ yield self._parts_manager.handle_thinking_delta(
1456
+ vendor_part_id=f'{chunk.item.id}-0',
1457
+ id=chunk.item.id,
1458
+ signature=signature,
1459
+ provider_name=self.provider_name,
1460
+ )
1388
1461
 
1389
1462
  elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
1390
1463
  yield self._parts_manager.handle_thinking_delta(
@@ -1411,7 +1484,9 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1411
1484
  pass # there's nothing we need to do here
1412
1485
 
1413
1486
  elif isinstance(chunk, responses.ResponseTextDeltaEvent):
1414
- maybe_event = self._parts_manager.handle_text_delta(vendor_part_id=chunk.item_id, content=chunk.delta)
1487
+ maybe_event = self._parts_manager.handle_text_delta(
1488
+ vendor_part_id=chunk.item_id, content=chunk.delta, id=chunk.item_id
1489
+ )
1415
1490
  if maybe_event is not None: # pragma: no branch
1416
1491
  yield maybe_event
1417
1492
 
@@ -1501,3 +1576,17 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
1501
1576
  u.input_audio_tokens = response_usage.prompt_tokens_details.audio_tokens or 0
1502
1577
  u.cache_read_tokens = response_usage.prompt_tokens_details.cached_tokens or 0
1503
1578
  return u
1579
+
1580
+
1581
+ def _combine_tool_call_ids(call_id: str, id: str | None) -> str:
1582
+ # When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
1583
+ # Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
1584
+ return f'{call_id}|{id}' if id else call_id
1585
+
1586
+
1587
+ def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
1588
+ if '|' in combined_id:
1589
+ call_id, id = combined_id.split('|', 1)
1590
+ return call_id, id
1591
+ else:
1592
+ return combined_id, None # pragma: no cover
@@ -41,6 +41,9 @@ class OpenAIModelProfile(ModelProfile):
41
41
  openai_chat_supports_web_search: bool = False
42
42
  """Whether the model supports web search in Chat Completions API."""
43
43
 
44
+ openai_supports_encrypted_reasoning_content: bool = False
45
+ """Whether the model supports including encrypted reasoning content in the response."""
46
+
44
47
  def __post_init__(self): # pragma: no cover
45
48
  if not self.openai_supports_sampling_settings:
46
49
  warnings.warn(
@@ -84,6 +87,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
84
87
  openai_unsupported_model_settings=openai_unsupported_model_settings,
85
88
  openai_system_prompt_role=openai_system_prompt_role,
86
89
  openai_chat_supports_web_search=supports_web_search,
90
+ openai_supports_encrypted_reasoning_content=is_reasoning_model,
87
91
  )
88
92
 
89
93
 
@@ -47,6 +47,9 @@ class Provider(ABC, Generic[InterfaceClient]):
47
47
  """The model profile for the named model, if available."""
48
48
  return None # pragma: no cover
49
49
 
50
+ def __repr__(self) -> str:
51
+ return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})'
52
+
50
53
 
51
54
  def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
52
55
  """Infers the provider class from the provider name."""
@@ -45,12 +45,15 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
45
45
  def __init__(self, *, anthropic_client: AsyncAnthropicClient | None = None) -> None: ...
46
46
 
47
47
  @overload
48
- def __init__(self, *, api_key: str | None = None, http_client: httpx.AsyncClient | None = None) -> None: ...
48
+ def __init__(
49
+ self, *, api_key: str | None = None, base_url: str | None = None, http_client: httpx.AsyncClient | None = None
50
+ ) -> None: ...
49
51
 
50
52
  def __init__(
51
53
  self,
52
54
  *,
53
55
  api_key: str | None = None,
56
+ base_url: str | None = None,
54
57
  anthropic_client: AsyncAnthropicClient | None = None,
55
58
  http_client: httpx.AsyncClient | None = None,
56
59
  ) -> None:
@@ -59,6 +62,7 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
59
62
  Args:
60
63
  api_key: The API key to use for authentication, if not provided, the `ANTHROPIC_API_KEY` environment variable
61
64
  will be used if available.
65
+ base_url: The base URL to use for the Anthropic API.
62
66
  anthropic_client: An existing [`AsyncAnthropic`](https://github.com/anthropics/anthropic-sdk-python)
63
67
  client to use. If provided, the `api_key` and `http_client` arguments will be ignored.
64
68
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
@@ -68,14 +72,14 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
68
72
  assert api_key is None, 'Cannot provide both `anthropic_client` and `api_key`'
69
73
  self._client = anthropic_client
70
74
  else:
71
- api_key = api_key or os.environ.get('ANTHROPIC_API_KEY')
75
+ api_key = api_key or os.getenv('ANTHROPIC_API_KEY')
72
76
  if not api_key:
73
77
  raise UserError(
74
78
  'Set the `ANTHROPIC_API_KEY` environment variable or pass it via `AnthropicProvider(api_key=...)`'
75
79
  'to use the Anthropic provider.'
76
80
  )
77
81
  if http_client is not None:
78
- self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
82
+ self._client = AsyncAnthropic(api_key=api_key, base_url=base_url, http_client=http_client)
79
83
  else:
80
84
  http_client = cached_async_http_client(provider='anthropic')
81
- self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
85
+ self._client = AsyncAnthropic(api_key=api_key, base_url=base_url, http_client=http_client)
@@ -60,14 +60,14 @@ class CohereProvider(Provider[AsyncClientV2]):
60
60
  assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
61
61
  self._client = cohere_client
62
62
  else:
63
- api_key = api_key or os.environ.get('CO_API_KEY')
63
+ api_key = api_key or os.getenv('CO_API_KEY')
64
64
  if not api_key:
65
65
  raise UserError(
66
66
  'Set the `CO_API_KEY` environment variable or pass it via `CohereProvider(api_key=...)`'
67
67
  'to use the Cohere provider.'
68
68
  )
69
69
 
70
- base_url = os.environ.get('CO_BASE_URL')
70
+ base_url = os.getenv('CO_BASE_URL')
71
71
  if http_client is not None:
72
72
  self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
73
73
  else:
@@ -0,0 +1,187 @@
1
+ """This module implements the Pydantic AI Gateway provider."""
2
+
3
+ from __future__ import annotations as _annotations
4
+
5
+ import os
6
+ from typing import TYPE_CHECKING, Any, Literal, overload
7
+ from urllib.parse import urljoin
8
+
9
+ import httpx
10
+
11
+ from pydantic_ai.exceptions import UserError
12
+ from pydantic_ai.models import Model, cached_async_http_client, get_user_agent
13
+
14
+ if TYPE_CHECKING:
15
+ from google.genai import Client as GoogleClient
16
+ from groq import AsyncGroq
17
+ from openai import AsyncOpenAI
18
+
19
+ from pydantic_ai.models.anthropic import AsyncAnthropicClient
20
+ from pydantic_ai.providers import Provider
21
+
22
+
23
+ @overload
24
+ def gateway_provider(
25
+ upstream_provider: Literal['openai', 'openai-chat', 'openai-responses'],
26
+ *,
27
+ api_key: str | None = None,
28
+ base_url: str | None = None,
29
+ http_client: httpx.AsyncClient | None = None,
30
+ ) -> Provider[AsyncOpenAI]: ...
31
+
32
+
33
+ @overload
34
+ def gateway_provider(
35
+ upstream_provider: Literal['groq'],
36
+ *,
37
+ api_key: str | None = None,
38
+ base_url: str | None = None,
39
+ http_client: httpx.AsyncClient | None = None,
40
+ ) -> Provider[AsyncGroq]: ...
41
+
42
+
43
+ @overload
44
+ def gateway_provider(
45
+ upstream_provider: Literal['google-vertex'],
46
+ *,
47
+ api_key: str | None = None,
48
+ base_url: str | None = None,
49
+ ) -> Provider[GoogleClient]: ...
50
+
51
+
52
+ @overload
53
+ def gateway_provider(
54
+ upstream_provider: Literal['anthropic'],
55
+ *,
56
+ api_key: str | None = None,
57
+ base_url: str | None = None,
58
+ ) -> Provider[AsyncAnthropicClient]: ...
59
+
60
+
61
+ def gateway_provider(
62
+ upstream_provider: Literal['openai', 'openai-chat', 'openai-responses', 'groq', 'google-vertex', 'anthropic'] | str,
63
+ *,
64
+ # Every provider
65
+ api_key: str | None = None,
66
+ base_url: str | None = None,
67
+ # OpenAI & Groq
68
+ http_client: httpx.AsyncClient | None = None,
69
+ ) -> Provider[Any]:
70
+ """Create a new Gateway provider.
71
+
72
+ Args:
73
+ upstream_provider: The upstream provider to use.
74
+ api_key: The API key to use for authentication. If not provided, the `PYDANTIC_AI_GATEWAY_API_KEY`
75
+ environment variable will be used if available.
76
+ base_url: The base URL to use for the Gateway. If not provided, the `PYDANTIC_AI_GATEWAY_BASE_URL`
77
+ environment variable will be used if available. Otherwise, defaults to `http://localhost:8787/`.
78
+ http_client: The HTTP client to use for the Gateway.
79
+ """
80
+ api_key = api_key or os.getenv('PYDANTIC_AI_GATEWAY_API_KEY')
81
+ if not api_key:
82
+ raise UserError(
83
+ 'Set the `PYDANTIC_AI_GATEWAY_API_KEY` environment variable or pass it via `gateway_provider(api_key=...)`'
84
+ ' to use the Pydantic AI Gateway provider.'
85
+ )
86
+
87
+ base_url = base_url or os.getenv('PYDANTIC_AI_GATEWAY_BASE_URL', 'http://localhost:8787')
88
+ http_client = http_client or cached_async_http_client(provider=f'gateway-{upstream_provider}')
89
+ http_client.event_hooks = {'request': [_request_hook]}
90
+
91
+ if upstream_provider in ('openai', 'openai-chat'):
92
+ from .openai import OpenAIProvider
93
+
94
+ return OpenAIProvider(api_key=api_key, base_url=urljoin(base_url, 'openai'), http_client=http_client)
95
+ elif upstream_provider == 'openai-responses':
96
+ from .openai import OpenAIProvider
97
+
98
+ return OpenAIProvider(api_key=api_key, base_url=urljoin(base_url, 'openai'), http_client=http_client)
99
+ elif upstream_provider == 'groq':
100
+ from .groq import GroqProvider
101
+
102
+ return GroqProvider(api_key=api_key, base_url=urljoin(base_url, 'groq'), http_client=http_client)
103
+ elif upstream_provider == 'anthropic':
104
+ from anthropic import AsyncAnthropic
105
+
106
+ from .anthropic import AnthropicProvider
107
+
108
+ return AnthropicProvider(
109
+ anthropic_client=AsyncAnthropic(
110
+ auth_token=api_key,
111
+ base_url=urljoin(base_url, 'anthropic'),
112
+ http_client=http_client,
113
+ )
114
+ )
115
+ elif upstream_provider == 'google-vertex':
116
+ from google.genai import Client as GoogleClient
117
+
118
+ from .google import GoogleProvider
119
+
120
+ return GoogleProvider(
121
+ client=GoogleClient(
122
+ vertexai=True,
123
+ api_key='unset',
124
+ http_options={
125
+ 'base_url': f'{base_url}/google-vertex',
126
+ 'headers': {'User-Agent': get_user_agent(), 'Authorization': api_key},
127
+ # TODO(Marcelo): Until https://github.com/googleapis/python-genai/issues/1357 is solved.
128
+ 'async_client_args': {
129
+ 'transport': httpx.AsyncHTTPTransport(),
130
+ 'event_hooks': {'request': [_request_hook]},
131
+ },
132
+ },
133
+ )
134
+ )
135
+ else: # pragma: no cover
136
+ raise UserError(f'Unknown provider: {upstream_provider}')
137
+
138
+
139
+ def infer_model(model_name: str) -> Model:
140
+ """Infer the model class that will be used to make requests to the gateway.
141
+
142
+ Args:
143
+ model_name: The name of the model to infer. Must be in the format "provider/model_name".
144
+
145
+ Returns:
146
+ The model class that will be used to make requests to the gateway.
147
+ """
148
+ try:
149
+ upstream_provider, model_name = model_name.split('/', 1)
150
+ except ValueError:
151
+ raise UserError(f'The model name "{model_name}" is not in the format "provider/model_name".')
152
+
153
+ if upstream_provider in ('openai', 'openai-chat'):
154
+ from pydantic_ai.models.openai import OpenAIChatModel
155
+
156
+ return OpenAIChatModel(model_name, provider=gateway_provider('openai'))
157
+ elif upstream_provider == 'openai-responses':
158
+ from pydantic_ai.models.openai import OpenAIResponsesModel
159
+
160
+ return OpenAIResponsesModel(model_name, provider=gateway_provider('openai'))
161
+ elif upstream_provider == 'groq':
162
+ from pydantic_ai.models.groq import GroqModel
163
+
164
+ return GroqModel(model_name, provider=gateway_provider('groq'))
165
+ elif upstream_provider == 'anthropic':
166
+ from pydantic_ai.models.anthropic import AnthropicModel
167
+
168
+ return AnthropicModel(model_name, provider=gateway_provider('anthropic'))
169
+ elif upstream_provider == 'google-vertex':
170
+ from pydantic_ai.models.google import GoogleModel
171
+
172
+ return GoogleModel(model_name, provider=gateway_provider('google-vertex'))
173
+ raise UserError(f'Unknown upstream provider: {upstream_provider}')
174
+
175
+
176
+ async def _request_hook(request: httpx.Request) -> httpx.Request:
177
+ """Request hook for the gateway provider.
178
+
179
+ It adds the `"traceparent"` header to the request.
180
+ """
181
+ from opentelemetry.propagate import inject
182
+
183
+ headers: dict[str, Any] = {}
184
+ inject(headers)
185
+ request.headers.update(headers)
186
+
187
+ return request
@@ -106,13 +106,13 @@ class GoogleProvider(Provider[Client]):
106
106
  else:
107
107
  self._client = Client(
108
108
  vertexai=vertexai,
109
- project=project or os.environ.get('GOOGLE_CLOUD_PROJECT'),
109
+ project=project or os.getenv('GOOGLE_CLOUD_PROJECT'),
110
110
  # From https://github.com/pydantic/pydantic-ai/pull/2031/files#r2169682149:
111
111
  # Currently `us-central1` supports the most models by far of any region including `global`, but not
112
112
  # all of them. `us-central1` has all google models but is missing some Anthropic partner models,
113
113
  # which use `us-east5` instead. `global` has fewer models but higher availability.
114
114
  # For more details, check: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions
115
- location=location or os.environ.get('GOOGLE_CLOUD_LOCATION') or 'us-central1',
115
+ location=location or os.getenv('GOOGLE_CLOUD_LOCATION') or 'us-central1',
116
116
  credentials=credentials,
117
117
  http_options=http_options,
118
118
  )
@@ -39,7 +39,7 @@ class GoogleGLAProvider(Provider[httpx.AsyncClient]):
39
39
  will be used if available.
40
40
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
41
41
  """
42
- api_key = api_key or os.environ.get('GEMINI_API_KEY')
42
+ api_key = api_key or os.getenv('GEMINI_API_KEY')
43
43
  if not api_key:
44
44
  raise UserError(
45
45
  'Set the `GEMINI_API_KEY` environment variable or pass it via `GoogleGLAProvider(api_key=...)`'
@@ -53,7 +53,7 @@ class GroqProvider(Provider[AsyncGroq]):
53
53
 
54
54
  @property
55
55
  def base_url(self) -> str:
56
- return os.environ.get('GROQ_BASE_URL', 'https://api.groq.com')
56
+ return str(self.client.base_url)
57
57
 
58
58
  @property
59
59
  def client(self) -> AsyncGroq:
@@ -85,12 +85,15 @@ class GroqProvider(Provider[AsyncGroq]):
85
85
  def __init__(self, *, groq_client: AsyncGroq | None = None) -> None: ...
86
86
 
87
87
  @overload
88
- def __init__(self, *, api_key: str | None = None, http_client: httpx.AsyncClient | None = None) -> None: ...
88
+ def __init__(
89
+ self, *, api_key: str | None = None, base_url: str | None = None, http_client: httpx.AsyncClient | None = None
90
+ ) -> None: ...
89
91
 
90
92
  def __init__(
91
93
  self,
92
94
  *,
93
95
  api_key: str | None = None,
96
+ base_url: str | None = None,
94
97
  groq_client: AsyncGroq | None = None,
95
98
  http_client: httpx.AsyncClient | None = None,
96
99
  ) -> None:
@@ -99,6 +102,8 @@ class GroqProvider(Provider[AsyncGroq]):
99
102
  Args:
100
103
  api_key: The API key to use for authentication, if not provided, the `GROQ_API_KEY` environment variable
101
104
  will be used if available.
105
+ base_url: The base url for the Groq requests. If not provided, the `GROQ_BASE_URL` environment variable
106
+ will be used if available. Otherwise, defaults to Groq's base url.
102
107
  groq_client: An existing
103
108
  [`AsyncGroq`](https://github.com/groq/groq-python?tab=readme-ov-file#async-usage)
104
109
  client to use. If provided, `api_key` and `http_client` must be `None`.
@@ -107,9 +112,11 @@ class GroqProvider(Provider[AsyncGroq]):
107
112
  if groq_client is not None:
108
113
  assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
109
114
  assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
115
+ assert base_url is None, 'Cannot provide both `groq_client` and `base_url`'
110
116
  self._client = groq_client
111
117
  else:
112
- api_key = api_key or os.environ.get('GROQ_API_KEY')
118
+ api_key = api_key or os.getenv('GROQ_API_KEY')
119
+ base_url = base_url or os.getenv('GROQ_BASE_URL', 'https://api.groq.com')
113
120
 
114
121
  if not api_key:
115
122
  raise UserError(
@@ -117,7 +124,7 @@ class GroqProvider(Provider[AsyncGroq]):
117
124
  'to use the Groq provider.'
118
125
  )
119
126
  elif http_client is not None:
120
- self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
127
+ self._client = AsyncGroq(base_url=base_url, api_key=api_key, http_client=http_client)
121
128
  else:
122
129
  http_client = cached_async_http_client(provider='groq')
123
- self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
130
+ self._client = AsyncGroq(base_url=base_url, api_key=api_key, http_client=http_client)
@@ -65,14 +65,14 @@ class HerokuProvider(Provider[AsyncOpenAI]):
65
65
  assert api_key is None, 'Cannot provide both `openai_client` and `api_key`'
66
66
  self._client = openai_client
67
67
  else:
68
- api_key = api_key or os.environ.get('HEROKU_INFERENCE_KEY')
68
+ api_key = api_key or os.getenv('HEROKU_INFERENCE_KEY')
69
69
  if not api_key:
70
70
  raise UserError(
71
71
  'Set the `HEROKU_INFERENCE_KEY` environment variable or pass it via `HerokuProvider(api_key=...)`'
72
72
  'to use the Heroku provider.'
73
73
  )
74
74
 
75
- base_url = base_url or os.environ.get('HEROKU_INFERENCE_URL', 'https://us.inference.heroku.com')
75
+ base_url = base_url or os.getenv('HEROKU_INFERENCE_URL', 'https://us.inference.heroku.com')
76
76
  base_url = base_url.rstrip('/') + '/v1'
77
77
 
78
78
  if http_client is not None:
@@ -95,7 +95,7 @@ class HuggingFaceProvider(Provider[AsyncInferenceClient]):
95
95
  defaults to "auto", which will select the first available provider for the model, the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
96
96
  If `base_url` is passed, then `provider_name` is not used.
97
97
  """
98
- api_key = api_key or os.environ.get('HF_TOKEN')
98
+ api_key = api_key or os.getenv('HF_TOKEN')
99
99
 
100
100
  if api_key is None:
101
101
  raise UserError(
@@ -67,7 +67,7 @@ class MistralProvider(Provider[Mistral]):
67
67
  assert base_url is None, 'Cannot provide both `mistral_client` and `base_url`'
68
68
  self._client = mistral_client
69
69
  else:
70
- api_key = api_key or os.environ.get('MISTRAL_API_KEY')
70
+ api_key = api_key or os.getenv('MISTRAL_API_KEY')
71
71
 
72
72
  if not api_key:
73
73
  raise UserError(
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import os
4
+ from typing import overload
4
5
 
5
6
  import httpx
6
7
 
@@ -36,6 +37,18 @@ class OpenAIProvider(Provider[AsyncOpenAI]):
36
37
  def model_profile(self, model_name: str) -> ModelProfile | None:
37
38
  return openai_model_profile(model_name)
38
39
 
40
+ @overload
41
+ def __init__(self, *, openai_client: AsyncOpenAI) -> None: ...
42
+
43
+ @overload
44
+ def __init__(
45
+ self,
46
+ base_url: str | None = None,
47
+ api_key: str | None = None,
48
+ openai_client: None = None,
49
+ http_client: httpx.AsyncClient | None = None,
50
+ ) -> None: ...
51
+
39
52
  def __init__(
40
53
  self,
41
54
  base_url: str | None = None,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.3
36
+ Requires-Dist: pydantic-graph==1.0.5
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.3; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.5; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -8,7 +8,7 @@ pydantic_ai/_griffe.py,sha256=BphvTL00FHxsSY56GM-bNyCOdwrpL0T3LbDQITWUK_Q,5280
8
8
  pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
9
9
  pydantic_ai/_otel_messages.py,sha256=qLu81aBDEAsUTW6efBzWRXNDMICTrUUBpcGbCEyXr4o,1480
10
10
  pydantic_ai/_output.py,sha256=phJ9AQYUlhQhAVikL0FpPn_Vm05V_yK3VYmCUUtH778,38296
11
- pydantic_ai/_parts_manager.py,sha256=qcHBJOM-AxC5iXxt7J6NvPFUIbyPUTE_O18Jiw6uUpA,17903
11
+ pydantic_ai/_parts_manager.py,sha256=1l6RoyhuiDzbZyHP0asYFm63-nuswrIv1H8O8017qAY,18035
12
12
  pydantic_ai/_run_context.py,sha256=AFSTtOBbUAnPpM-V5_b5fLMVAFbEBX4oOdYsGR9ayt4,1824
13
13
  pydantic_ai/_system_prompt.py,sha256=WdDW_DTGHujcFFaK-J7J6mA4ZDJZ0IOKpyizJA-1Y5Q,1142
14
14
  pydantic_ai/_thinking_part.py,sha256=x80-Vkon16GOyq3W6f2qzafTVPC5dCgF7QD3k8ZMmYU,1304
@@ -20,7 +20,7 @@ pydantic_ai/direct.py,sha256=zMsz6poVgEq7t7L_8FWM6hmKdqTzjyQYL5xzQt_59Us,14951
20
20
  pydantic_ai/exceptions.py,sha256=zsXZMKf2BJuVsfuHl1fWTkogLU37bd4yq7D6BKHAzVs,4968
21
21
  pydantic_ai/format_prompt.py,sha256=37imBG2Fgpn-_RfAFalOX8Xc_XpGH2gY9tnhJDvxfk8,4243
22
22
  pydantic_ai/mcp.py,sha256=cmgi3Nq_qe1cTqs-R92WMfZw6bwjSqy2R6NiR7isPcQ,30364
23
- pydantic_ai/messages.py,sha256=gP5Tzjq3TfOJZuA98kK7oOMTX7gAH6yvW7uDfQYLRIY,55787
23
+ pydantic_ai/messages.py,sha256=bq9Ps-CsYkXdkq4eu1gmIoiLiYsFTwEzB4fXUF_neic,55865
24
24
  pydantic_ai/output.py,sha256=wzNgVKJgxyXtSH-uNbRxIaUNLidxlQcwWYT2o1gY2hE,12037
25
25
  pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  pydantic_ai/result.py,sha256=FrJbd0nwaRVIxGH_EhV-ITQvrrd-JaDya9EDsE5-Pps,25389
@@ -52,20 +52,20 @@ pydantic_ai/durable_exec/temporal/_toolset.py,sha256=HxmQ5vut7Zd5eyrC27eNNn5_CHA
52
52
  pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
53
  pydantic_ai/ext/aci.py,sha256=sUllKDNO-LOMurbFgxwRHuzNlBkSa3aVBqXfEm-A_vo,2545
54
54
  pydantic_ai/ext/langchain.py,sha256=iLVEZv1kcLkdIHo3us2yfdi0kVqyJ6qTaCt9BoLWm4k,2335
55
- pydantic_ai/models/__init__.py,sha256=30HXKHE_iGcAMdGfanYY_UEmyyM16bKg-PPf0jZPcbo,36091
56
- pydantic_ai/models/anthropic.py,sha256=3QAgeshsDQ40E_knZaxy00GmyiYM7Wz0lOVSKYhBxFU,32804
57
- pydantic_ai/models/bedrock.py,sha256=79Uyi-aeee9EVksOoUd0IXORaTbwUc8AzphS2Nipo28,33437
55
+ pydantic_ai/models/__init__.py,sha256=na9M98DMJ0VpsYhcJ9WI80EI0278XJEJ9jIE_hlW6q4,36256
56
+ pydantic_ai/models/anthropic.py,sha256=-dH4qYSRlRD1XiC1wR89oGHKnFTjxP8zQh0scQDkTCk,32768
57
+ pydantic_ai/models/bedrock.py,sha256=wHo65QNEsfsb1UaUv_TpvJ0WrgFoKoegB6I3eDVnORI,33393
58
58
  pydantic_ai/models/cohere.py,sha256=Oly6wpw7Xj0z-690foknLK2z2F9ukDOjxQCGBFFJKkk,13898
59
59
  pydantic_ai/models/fallback.py,sha256=XJ74wRxVT4dF0uewHH3is9I-zcLBK8KFIhpK3BB6mRw,5526
60
60
  pydantic_ai/models/function.py,sha256=9ZuRDQXChiA_S3a_M9tmmYQwlyuUEFZ20aYrnPqdTz8,14599
61
61
  pydantic_ai/models/gemini.py,sha256=DYEaOnwGmo9FUGVkRRrydGuQwYhnO-Cq5grTurLWgb4,39376
62
- pydantic_ai/models/google.py,sha256=BztYqxeGO2k6uJP4Rv4QSW8oLCnmus9t7V3w0NyCqEc,34921
62
+ pydantic_ai/models/google.py,sha256=i8dMi3wyeSTT8CFgbDd9gxKUhahgkBEXNXjLqs2utwI,34758
63
63
  pydantic_ai/models/groq.py,sha256=G17TeyQeIAYG7hANe1LkZhj41xScB8VYum3AK7Q5KRA,26769
64
64
  pydantic_ai/models/huggingface.py,sha256=f1tZObCJkcbiUCwNoPyuiaRaGYuj0GBFmbA8yFd-tHY,21176
65
65
  pydantic_ai/models/instrumented.py,sha256=DCnyG7HXgV-W2EWac8oZb2A8PL8yarXeU7Rt97l4w_s,21421
66
66
  pydantic_ai/models/mcp_sampling.py,sha256=qnLCO3CB5bNQ86SpWRA-CSSOVcCCLPwjHtcNFvW9wHs,3461
67
- pydantic_ai/models/mistral.py,sha256=FXZ7QnVJSi9j8V1z0Bz56N_WhQOjiDVYGy5tIV76KU4,33630
68
- pydantic_ai/models/openai.py,sha256=GCsKTlAn1n0yqWkWqcsihhcCNPdKLbl1NlICbmmcSZg,69533
67
+ pydantic_ai/models/mistral.py,sha256=ru8EHwFS0xZBN6s1tlssUdjxjQyjB9L_8kFH7qq5U_g,33654
68
+ pydantic_ai/models/openai.py,sha256=AsAqJIjTeN2d-WX0wPLM9phbEZHoR8PvWkfplRWUp7U,74021
69
69
  pydantic_ai/models/test.py,sha256=1kBwi7pSUt9_K1U-hokOilplxJWPQ3KRKH_s8bYmt_s,19969
70
70
  pydantic_ai/models/wrapper.py,sha256=9MeHW7mXPsEK03IKL0rtjeX6QgXyZROOOzLh72GiX2k,2148
71
71
  pydantic_ai/profiles/__init__.py,sha256=V6uGAVJuIaYRuZOQjkdIyFfDKD5py18RC98njnHOFug,3293
@@ -81,29 +81,30 @@ pydantic_ai/profiles/harmony.py,sha256=_81tOGOYGTH3Za67jjtdINvASTTM5_CTyc1Ej2KHJ
81
81
  pydantic_ai/profiles/meta.py,sha256=JdZcpdRWx8PY1pU9Z2i_TYtA0Cpbg23xyFrV7eXnooY,309
82
82
  pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
83
83
  pydantic_ai/profiles/moonshotai.py,sha256=e1RJnbEvazE6aJAqfmYLYGNtwNwg52XQDRDkcLrv3fU,272
84
- pydantic_ai/profiles/openai.py,sha256=aOmuA4g1bz-JHirJoEpah7tmZPYrdISMPkvh87jSb4I,9365
84
+ pydantic_ai/profiles/openai.py,sha256=YmIm_2YoIcUrQ22XVcBy9mdXMAF-5glUdYme2jY_5nc,9592
85
85
  pydantic_ai/profiles/qwen.py,sha256=9SnTpMKndxNQMFyumyaOczJa5JGWbYQdpVKKW4OzKjk,749
86
- pydantic_ai/providers/__init__.py,sha256=QlFpPM_kGnF_YAzwB9Dgmxx4Emp33x0bh831H_xKDXE,4478
87
- pydantic_ai/providers/anthropic.py,sha256=bDFNAE4WB66Dn7YDnI3dv6yBbMmM9Kzt2kalM4Fq8WQ,3158
86
+ pydantic_ai/providers/__init__.py,sha256=ksqF_HFRWiFsoXwz4lIQSA2VRvfQTCliRIDVepvBkiE,4598
87
+ pydantic_ai/providers/anthropic.py,sha256=4Cy5S2lnU_hThp8wkDPwWev1Mzmiztw4otFsfrpL8F8,3336
88
88
  pydantic_ai/providers/azure.py,sha256=msYyeQoHATxCJkiF1N05lPSJivh-SWKK1463WF6xTK4,5823
89
89
  pydantic_ai/providers/bedrock.py,sha256=t4ADWFYJKxW-Al5-lOSlb4p-5YiTEOU_O7owcVSP3CU,6522
90
90
  pydantic_ai/providers/cerebras.py,sha256=2zgsNxup_7OEPOXnbJHMYnVRDnB9UYTQnOO4wv7xnYA,3436
91
- pydantic_ai/providers/cohere.py,sha256=-F0prLuI2aDtHNZakd1GlyvgFLio-fo5n6fRbyPMvro,2858
91
+ pydantic_ai/providers/cohere.py,sha256=5__UI6PdNz1eqGda2ylBwUuenpNG9MJN-fDSbFO0euQ,2848
92
92
  pydantic_ai/providers/deepseek.py,sha256=JSc7dQbB-l7_Phf61ZLb4_c1oym9fHea_h2Yq88uoL8,3032
93
93
  pydantic_ai/providers/fireworks.py,sha256=-jMRxbt353nENdpxuDpC4zJZ9wlJBcWa4wdcUk4cXKo,3594
94
+ pydantic_ai/providers/gateway.py,sha256=2O7nepvn8s3IbMSAdRZ8V_ag0VmjDKjvc9gGCb99AEE,6675
94
95
  pydantic_ai/providers/github.py,sha256=Mp6-piXuRe5R0Iu4p0N06aIZgX7rJe5KRzCjt9E4OK4,4378
95
- pydantic_ai/providers/google.py,sha256=iLXcKUl5r7wdLuZtT1IM3obGZi7ecLM_PDyWdQKDncI,6038
96
- pydantic_ai/providers/google_gla.py,sha256=dLkDxps5gEtxsQiDbs1e88lXLYeX4i2qnJtDiFFJ0Ng,1965
96
+ pydantic_ai/providers/google.py,sha256=rYLZa1LUkOXKu39_VIF7u3buYC78IWiYwCEiS0vRJH8,6028
97
+ pydantic_ai/providers/google_gla.py,sha256=X_gxItZg8IGGK0RRsa8lXA_vQtPHEiIDLpfg2u9s5oY,1960
97
98
  pydantic_ai/providers/google_vertex.py,sha256=tAR3L1DZPDvGOJsKyGkIRPeXL7wjly4CvqTWMK1ozVQ,9752
98
99
  pydantic_ai/providers/grok.py,sha256=s9Y_iYkYCBc7UbP2ppGOUdAP_04xrkmPBHq3q3Qr9eE,3109
99
- pydantic_ai/providers/groq.py,sha256=3XuYqvugToJhTf7kQCdtdaTpFsiqAu_pwnIQnHm04uo,4913
100
- pydantic_ai/providers/heroku.py,sha256=wA36vh0ldpdaj33FPtfo4roY_MhaCqErjLyGtcbC6Xs,2958
101
- pydantic_ai/providers/huggingface.py,sha256=MLAv-Z99Kii5Faolq97_0Ir1LUKH9CwRmJFaI5RvwW4,4914
100
+ pydantic_ai/providers/groq.py,sha256=axqy1Hko3NISSzwHACGZddwLZDGup89o13eRwBboU8I,5321
101
+ pydantic_ai/providers/heroku.py,sha256=ADohwsLz25jpqxVjtv9hFhMrJui7AGak5j3hbTIHZPc,2948
102
+ pydantic_ai/providers/huggingface.py,sha256=_Bvi2qdbOB8E9mhiJX3fVoUDZWPCTduCFASZT9JhnI8,4909
102
103
  pydantic_ai/providers/litellm.py,sha256=3hTCjHWRG_1c4S9JSNm0BDBDi4q6BVVZ3OLSXhTndNM,5079
103
- pydantic_ai/providers/mistral.py,sha256=ZxfOQNB2RADtHeGLQrhxHwq6cXpBi3LMgIUa_9wXoug,3088
104
+ pydantic_ai/providers/mistral.py,sha256=pHcWHb2Wf9ZcqQl_Lp84ZvepO0Hmyb1CiqCTbur9S-s,3083
104
105
  pydantic_ai/providers/moonshotai.py,sha256=LwasmxCZCPkq1pb1uDtZTEb_nE55bAtX3QXgLmuNlHE,3260
105
106
  pydantic_ai/providers/ollama.py,sha256=_bxons0p8g0RSPNV8iq3AScVS1ym27QTW4zhDqSakgY,4633
106
- pydantic_ai/providers/openai.py,sha256=xCpR2c7QnYQukiJJKiFTSaGSewPFht7ekTasJDjSimA,3071
107
+ pydantic_ai/providers/openai.py,sha256=SKRsYRUW_zu24iKAM7KJ-6j8GQDIjjxll4AWY1uB3Vs,3410
107
108
  pydantic_ai/providers/openrouter.py,sha256=PXGgHPtlQQHKFaSnmiswWZ3dTvmT9PAg-NvfRYGjrPw,4154
108
109
  pydantic_ai/providers/together.py,sha256=Dln_NgCul1XVOQtNaYvqWrNjOWj9XzA8n4NwNMKkbLk,3450
109
110
  pydantic_ai/providers/vercel.py,sha256=Q7pPvzaoh7Uiqq7CD8TxaWnXnXRKYgWJRwQXSYm0ZKQ,4257
@@ -119,8 +120,8 @@ pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fb
119
120
  pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
120
121
  pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
121
122
  pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
122
- pydantic_ai_slim-1.0.3.dist-info/METADATA,sha256=NHnWG1OWAS7GaA51ZBBz0bV0OI3qM3Qc4LLF5BYIleA,4627
123
- pydantic_ai_slim-1.0.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
124
- pydantic_ai_slim-1.0.3.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
125
- pydantic_ai_slim-1.0.3.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
126
- pydantic_ai_slim-1.0.3.dist-info/RECORD,,
123
+ pydantic_ai_slim-1.0.5.dist-info/METADATA,sha256=Q3hdkuEKOT2YbEgzFhTBzofRVHl_wZZMFc5nXCgsTEY,4627
124
+ pydantic_ai_slim-1.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
125
+ pydantic_ai_slim-1.0.5.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
126
+ pydantic_ai_slim-1.0.5.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
127
+ pydantic_ai_slim-1.0.5.dist-info/RECORD,,