pydantic-ai-slim 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

pydantic_ai/_output.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations as _annotations
2
2
 
3
3
  import inspect
4
4
  import json
5
+ import re
5
6
  from abc import ABC, abstractmethod
6
7
  from collections.abc import Awaitable, Callable, Sequence
7
8
  from dataclasses import dataclass, field
@@ -70,6 +71,7 @@ Usage `OutputValidatorFunc[AgentDepsT, T]`.
70
71
 
71
72
  DEFAULT_OUTPUT_TOOL_NAME = 'final_result'
72
73
  DEFAULT_OUTPUT_TOOL_DESCRIPTION = 'The final response which ends this conversation'
74
+ OUTPUT_TOOL_NAME_SANITIZER = re.compile(r'[^a-zA-Z0-9-_]')
73
75
 
74
76
 
75
77
  async def execute_traced_output_function(
@@ -554,6 +556,20 @@ class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
554
556
  def mode(self) -> OutputMode:
555
557
  return 'prompted'
556
558
 
559
+ @classmethod
560
+ def build_instructions(cls, template: str, object_def: OutputObjectDefinition) -> str:
561
+ """Build instructions from a template and an object definition."""
562
+ schema = object_def.json_schema.copy()
563
+ if object_def.name:
564
+ schema['title'] = object_def.name
565
+ if object_def.description:
566
+ schema['description'] = object_def.description
567
+
568
+ if '{schema}' not in template:
569
+ template = '\n\n'.join([template, '{schema}'])
570
+
571
+ return template.format(schema=json.dumps(schema))
572
+
557
573
  def raise_if_unsupported(self, profile: ModelProfile) -> None:
558
574
  """Raise an error if the mode is not supported by this model."""
559
575
  super().raise_if_unsupported(profile)
@@ -561,18 +577,8 @@ class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
561
577
  def instructions(self, default_template: str) -> str:
562
578
  """Get instructions to tell model to output JSON matching the schema."""
563
579
  template = self.template or default_template
564
-
565
- if '{schema}' not in template:
566
- template = '\n\n'.join([template, '{schema}'])
567
-
568
580
  object_def = self.object_def
569
- schema = object_def.json_schema.copy()
570
- if object_def.name:
571
- schema['title'] = object_def.name
572
- if object_def.description:
573
- schema['description'] = object_def.description
574
-
575
- return template.format(schema=json.dumps(schema))
581
+ return self.build_instructions(template, object_def)
576
582
 
577
583
 
578
584
  @dataclass(init=False)
@@ -997,7 +1003,9 @@ class OutputToolset(AbstractToolset[AgentDepsT]):
997
1003
  if name is None:
998
1004
  name = default_name
999
1005
  if multiple:
1000
- name += f'_{object_def.name}'
1006
+ # strip unsupported characters like "[" and "]" from generic class names
1007
+ safe_name = OUTPUT_TOOL_NAME_SANITIZER.sub('', object_def.name or '')
1008
+ name += f'_{safe_name}'
1001
1009
 
1002
1010
  i = 1
1003
1011
  original_name = name
@@ -1431,6 +1431,8 @@ class OpenAIResponsesModel(Model):
1431
1431
  call_id=call_id,
1432
1432
  type='function_call',
1433
1433
  )
1434
+ if profile.openai_responses_requires_function_call_status_none:
1435
+ param['status'] = None # type: ignore[reportGeneralTypeIssues]
1434
1436
  if id and send_item_ids: # pragma: no branch
1435
1437
  param['id'] = id
1436
1438
  openai_messages.append(param)
@@ -0,0 +1,563 @@
1
+ # There are linting and coverage escapes for MLXLM and VLLMOffline as the CI would not contain the right
2
+ # environment to be able to run the associated tests
3
+
4
+ # pyright: reportUnnecessaryTypeIgnoreComment = false
5
+
6
+ from __future__ import annotations
7
+
8
+ import io
9
+ from collections.abc import AsyncIterable, AsyncIterator, Sequence
10
+ from contextlib import asynccontextmanager
11
+ from dataclasses import dataclass
12
+ from datetime import datetime, timezone
13
+ from typing import TYPE_CHECKING, Any, Literal, cast
14
+
15
+ from typing_extensions import assert_never
16
+
17
+ from .. import UnexpectedModelBehavior, _utils
18
+ from .._output import PromptedOutputSchema
19
+ from .._run_context import RunContext
20
+ from .._thinking_part import split_content_into_text_and_thinking
21
+ from ..exceptions import UserError
22
+ from ..messages import (
23
+ BinaryContent,
24
+ BuiltinToolCallPart,
25
+ BuiltinToolReturnPart,
26
+ FilePart,
27
+ ImageUrl,
28
+ ModelMessage,
29
+ ModelRequest,
30
+ ModelResponse,
31
+ ModelResponsePart,
32
+ ModelResponseStreamEvent,
33
+ RetryPromptPart,
34
+ SystemPromptPart,
35
+ TextPart,
36
+ ThinkingPart,
37
+ ToolCallPart,
38
+ ToolReturnPart,
39
+ UserPromptPart,
40
+ )
41
+ from ..profiles import ModelProfile, ModelProfileSpec
42
+ from ..providers import Provider, infer_provider
43
+ from ..settings import ModelSettings
44
+ from . import (
45
+ DownloadedItem,
46
+ Model,
47
+ ModelRequestParameters,
48
+ StreamedResponse,
49
+ download_item,
50
+ )
51
+
52
+ try:
53
+ from outlines.inputs import Chat, Image
54
+ from outlines.models.base import AsyncModel as OutlinesAsyncBaseModel, Model as OutlinesBaseModel
55
+ from outlines.models.llamacpp import LlamaCpp, from_llamacpp
56
+ from outlines.models.mlxlm import MLXLM, from_mlxlm
57
+ from outlines.models.sglang import AsyncSGLang, SGLang, from_sglang
58
+ from outlines.models.transformers import (
59
+ Transformers,
60
+ from_transformers,
61
+ )
62
+ from outlines.models.vllm_offline import (
63
+ VLLMOffline,
64
+ from_vllm_offline, # pyright: ignore[reportUnknownVariableType]
65
+ )
66
+ from outlines.types.dsl import JsonSchema
67
+ from PIL import Image as PILImage
68
+ except ImportError as _import_error:
69
+ raise ImportError(
70
+ 'Please install `outlines` to use the Outlines model, '
71
+ 'you can use the `outlines` optional group — `pip install "pydantic-ai-slim[outlines]"`'
72
+ ) from _import_error
73
+
74
+ if TYPE_CHECKING:
75
+ import llama_cpp
76
+ import mlx.nn as nn
77
+ import transformers
78
+
79
+
80
+ @dataclass(init=False)
81
+ class OutlinesModel(Model):
82
+ """A model that relies on the Outlines library to run non API-based models."""
83
+
84
+ def __init__(
85
+ self,
86
+ model: OutlinesBaseModel | OutlinesAsyncBaseModel,
87
+ *,
88
+ provider: Literal['outlines'] | Provider[OutlinesBaseModel] = 'outlines',
89
+ profile: ModelProfileSpec | None = None,
90
+ settings: ModelSettings | None = None,
91
+ ):
92
+ """Initialize an Outlines model.
93
+
94
+ Args:
95
+ model: The Outlines model used for the model.
96
+ provider: The provider to use for OutlinesModel. Can be either the string 'outlines' or an
97
+ instance of `Provider[OutlinesBaseModel]`. If not provided, the other parameters will be used.
98
+ profile: The model profile to use. Defaults to a profile picked by the provider.
99
+ settings: Default model settings for this model instance.
100
+ """
101
+ self.model: OutlinesBaseModel | OutlinesAsyncBaseModel = model
102
+ self._model_name: str = 'outlines-model'
103
+
104
+ if isinstance(provider, str):
105
+ provider = infer_provider(provider)
106
+
107
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
108
+
109
+ @classmethod
110
+ def from_transformers(
111
+ cls,
112
+ hf_model: transformers.modeling_utils.PreTrainedModel,
113
+ hf_tokenizer_or_processor: transformers.tokenization_utils.PreTrainedTokenizer
114
+ | transformers.processing_utils.ProcessorMixin,
115
+ *,
116
+ provider: Literal['outlines'] | Provider[OutlinesBaseModel] = 'outlines',
117
+ profile: ModelProfileSpec | None = None,
118
+ settings: ModelSettings | None = None,
119
+ ):
120
+ """Create an Outlines model from a Hugging Face model and tokenizer.
121
+
122
+ Args:
123
+ hf_model: The Hugging Face PreTrainedModel or any model that is compatible with the
124
+ `transformers` API.
125
+ hf_tokenizer_or_processor: Either a HuggingFace `PreTrainedTokenizer` or any tokenizer that is compatible
126
+ with the `transformers` API, or a HuggingFace processor inheriting from `ProcessorMixin`. If a
127
+ tokenizer is provided, a regular model will be used, while if you provide a processor, it will be a
128
+ multimodal model.
129
+ provider: The provider to use for OutlinesModel. Can be either the string 'outlines' or an
130
+ instance of `Provider[OutlinesBaseModel]`. If not provided, the other parameters will be used.
131
+ profile: The model profile to use. Defaults to a profile picked by the provider.
132
+ settings: Default model settings for this model instance.
133
+ """
134
+ outlines_model: OutlinesBaseModel = from_transformers(hf_model, hf_tokenizer_or_processor)
135
+ return cls(outlines_model, provider=provider, profile=profile, settings=settings)
136
+
137
+ @classmethod
138
+ def from_llamacpp(
139
+ cls,
140
+ llama_model: llama_cpp.Llama,
141
+ *,
142
+ provider: Literal['outlines'] | Provider[OutlinesBaseModel] = 'outlines',
143
+ profile: ModelProfileSpec | None = None,
144
+ settings: ModelSettings | None = None,
145
+ ):
146
+ """Create an Outlines model from a LlamaCpp model.
147
+
148
+ Args:
149
+ llama_model: The llama_cpp.Llama model to use.
150
+ provider: The provider to use for OutlinesModel. Can be either the string 'outlines' or an
151
+ instance of `Provider[OutlinesBaseModel]`. If not provided, the other parameters will be used.
152
+ profile: The model profile to use. Defaults to a profile picked by the provider.
153
+ settings: Default model settings for this model instance.
154
+ """
155
+ outlines_model: OutlinesBaseModel = from_llamacpp(llama_model)
156
+ return cls(outlines_model, provider=provider, profile=profile, settings=settings)
157
+
158
+ @classmethod
159
+ def from_mlxlm( # pragma: no cover
160
+ cls,
161
+ mlx_model: nn.Module,
162
+ mlx_tokenizer: transformers.tokenization_utils.PreTrainedTokenizer,
163
+ *,
164
+ provider: Literal['outlines'] | Provider[OutlinesBaseModel] = 'outlines',
165
+ profile: ModelProfileSpec | None = None,
166
+ settings: ModelSettings | None = None,
167
+ ):
168
+ """Create an Outlines model from a MLXLM model.
169
+
170
+ Args:
171
+ mlx_model: The nn.Module model to use.
172
+ mlx_tokenizer: The PreTrainedTokenizer to use.
173
+ provider: The provider to use for OutlinesModel. Can be either the string 'outlines' or an
174
+ instance of `Provider[OutlinesBaseModel]`. If not provided, the other parameters will be used.
175
+ profile: The model profile to use. Defaults to a profile picked by the provider.
176
+ settings: Default model settings for this model instance.
177
+ """
178
+ outlines_model: OutlinesBaseModel = from_mlxlm(mlx_model, mlx_tokenizer)
179
+ return cls(outlines_model, provider=provider, profile=profile, settings=settings)
180
+
181
+ @classmethod
182
+ def from_sglang(
183
+ cls,
184
+ base_url: str,
185
+ api_key: str | None = None,
186
+ model_name: str | None = None,
187
+ *,
188
+ provider: Literal['outlines'] | Provider[OutlinesBaseModel] = 'outlines',
189
+ profile: ModelProfileSpec | None = None,
190
+ settings: ModelSettings | None = None,
191
+ ):
192
+ """Create an Outlines model to send requests to an SGLang server.
193
+
194
+ Args:
195
+ base_url: The url of the SGLang server.
196
+ api_key: The API key to use for authenticating requests to the SGLang server.
197
+ model_name: The name of the model to use.
198
+ provider: The provider to use for OutlinesModel. Can be either the string 'outlines' or an
199
+ instance of `Provider[OutlinesBaseModel]`. If not provided, the other parameters will be used.
200
+ profile: The model profile to use. Defaults to a profile picked by the provider.
201
+ settings: Default model settings for this model instance.
202
+ """
203
+ try:
204
+ from openai import AsyncOpenAI
205
+ except ImportError as _import_error:
206
+ raise ImportError(
207
+ 'Please install `openai` to use the Outlines SGLang model, '
208
+ 'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
209
+ ) from _import_error
210
+
211
+ openai_client = AsyncOpenAI(base_url=base_url, api_key=api_key)
212
+ outlines_model: OutlinesBaseModel | OutlinesAsyncBaseModel = from_sglang(openai_client, model_name)
213
+ return cls(outlines_model, provider=provider, profile=profile, settings=settings)
214
+
215
+ @classmethod
216
+ def from_vllm_offline( # pragma: no cover
217
+ cls,
218
+ vllm_model: Any,
219
+ *,
220
+ provider: Literal['outlines'] | Provider[OutlinesBaseModel] = 'outlines',
221
+ profile: ModelProfileSpec | None = None,
222
+ settings: ModelSettings | None = None,
223
+ ):
224
+ """Create an Outlines model from a vLLM offline inference model.
225
+
226
+ Args:
227
+ vllm_model: The vllm.LLM local model to use.
228
+ provider: The provider to use for OutlinesModel. Can be either the string 'outlines' or an
229
+ instance of `Provider[OutlinesBaseModel]`. If not provided, the other parameters will be used.
230
+ profile: The model profile to use. Defaults to a profile picked by the provider.
231
+ settings: Default model settings for this model instance.
232
+ """
233
+ outlines_model: OutlinesBaseModel | OutlinesAsyncBaseModel = from_vllm_offline(vllm_model)
234
+ return cls(outlines_model, provider=provider, profile=profile, settings=settings)
235
+
236
+ @property
237
+ def model_name(self) -> str:
238
+ return self._model_name
239
+
240
+ @property
241
+ def system(self) -> str:
242
+ return 'outlines'
243
+
244
+ async def request(
245
+ self,
246
+ messages: list[ModelMessage],
247
+ model_settings: ModelSettings | None,
248
+ model_request_parameters: ModelRequestParameters,
249
+ ) -> ModelResponse:
250
+ """Make a request to the model."""
251
+ prompt, output_type, inference_kwargs = await self._build_generation_arguments(
252
+ messages, model_settings, model_request_parameters
253
+ )
254
+ # Async is available for SgLang
255
+ response: str
256
+ if isinstance(self.model, OutlinesAsyncBaseModel):
257
+ response = await self.model(prompt, output_type, None, **inference_kwargs)
258
+ else:
259
+ response = self.model(prompt, output_type, None, **inference_kwargs)
260
+ return self._process_response(response)
261
+
262
+ @asynccontextmanager
263
+ async def request_stream(
264
+ self,
265
+ messages: list[ModelMessage],
266
+ model_settings: ModelSettings | None,
267
+ model_request_parameters: ModelRequestParameters,
268
+ run_context: RunContext[Any] | None = None,
269
+ ) -> AsyncIterator[StreamedResponse]:
270
+ prompt, output_type, inference_kwargs = await self._build_generation_arguments(
271
+ messages, model_settings, model_request_parameters
272
+ )
273
+ # Async is available for SgLang
274
+ if isinstance(self.model, OutlinesAsyncBaseModel):
275
+ response = self.model.stream(prompt, output_type, None, **inference_kwargs)
276
+ yield await self._process_streamed_response(response, model_request_parameters)
277
+ else:
278
+ response = self.model.stream(prompt, output_type, None, **inference_kwargs)
279
+
280
+ async def async_response():
281
+ for chunk in response:
282
+ yield chunk
283
+
284
+ yield await self._process_streamed_response(async_response(), model_request_parameters)
285
+
286
+ async def _build_generation_arguments(
287
+ self,
288
+ messages: list[ModelMessage],
289
+ model_settings: ModelSettings | None,
290
+ model_request_parameters: ModelRequestParameters,
291
+ ) -> tuple[Chat, JsonSchema | None, dict[str, Any]]:
292
+ """Build the generation arguments for the model."""
293
+ if (
294
+ model_request_parameters.function_tools
295
+ or model_request_parameters.builtin_tools
296
+ or model_request_parameters.output_tools
297
+ ):
298
+ raise UserError('Outlines does not support function tools and builtin tools yet.')
299
+
300
+ if model_request_parameters.output_object:
301
+ instructions = PromptedOutputSchema.build_instructions(
302
+ self.profile.prompted_output_template, model_request_parameters.output_object
303
+ )
304
+ output_type = JsonSchema(model_request_parameters.output_object.json_schema)
305
+ else:
306
+ instructions = None
307
+ output_type = None
308
+
309
+ prompt = await self._format_prompt(messages, instructions)
310
+ inference_kwargs = self.format_inference_kwargs(model_settings)
311
+
312
+ return prompt, output_type, inference_kwargs
313
+
314
+ def format_inference_kwargs(self, model_settings: ModelSettings | None) -> dict[str, Any]:
315
+ """Format the model settings for the inference kwargs."""
316
+ settings_dict: dict[str, Any] = dict(model_settings) if model_settings else {}
317
+
318
+ if isinstance(self.model, Transformers):
319
+ settings_dict = self._format_transformers_inference_kwargs(settings_dict)
320
+ elif isinstance(self.model, LlamaCpp):
321
+ settings_dict = self._format_llama_cpp_inference_kwargs(settings_dict)
322
+ elif isinstance(self.model, MLXLM): # pragma: no cover
323
+ settings_dict = self._format_mlxlm_inference_kwargs(settings_dict)
324
+ elif isinstance(self.model, SGLang | AsyncSGLang):
325
+ settings_dict = self._format_sglang_inference_kwargs(settings_dict)
326
+ elif isinstance(self.model, VLLMOffline): # pragma: no cover
327
+ settings_dict = self._format_vllm_offline_inference_kwargs(settings_dict)
328
+
329
+ extra_body = settings_dict.pop('extra_body', {})
330
+ settings_dict.update(extra_body)
331
+
332
+ return settings_dict
333
+
334
+ def _format_transformers_inference_kwargs(self, model_settings: dict[str, Any]) -> dict[str, Any]:
335
+ """Select the model settings supported by the Transformers model."""
336
+ supported_args = [
337
+ 'max_tokens',
338
+ 'temperature',
339
+ 'top_p',
340
+ 'logit_bias',
341
+ 'extra_body',
342
+ ]
343
+ filtered_settings = {k: model_settings[k] for k in supported_args if k in model_settings}
344
+
345
+ return filtered_settings
346
+
347
+ def _format_llama_cpp_inference_kwargs(self, model_settings: dict[str, Any]) -> dict[str, Any]:
348
+ """Select the model settings supported by the LlamaCpp model."""
349
+ supported_args = [
350
+ 'max_tokens',
351
+ 'temperature',
352
+ 'top_p',
353
+ 'seed',
354
+ 'presence_penalty',
355
+ 'frequency_penalty',
356
+ 'logit_bias',
357
+ 'extra_body',
358
+ ]
359
+ filtered_settings = {k: model_settings[k] for k in supported_args if k in model_settings}
360
+
361
+ return filtered_settings
362
+
363
+ def _format_mlxlm_inference_kwargs( # pragma: no cover
364
+ self, model_settings: dict[str, Any]
365
+ ) -> dict[str, Any]:
366
+ """Select the model settings supported by the MLXLM model."""
367
+ supported_args = [
368
+ 'extra_body',
369
+ ]
370
+ filtered_settings = {k: model_settings[k] for k in supported_args if k in model_settings}
371
+
372
+ return filtered_settings
373
+
374
+ def _format_sglang_inference_kwargs(self, model_settings: dict[str, Any]) -> dict[str, Any]:
375
+ """Select the model settings supported by the SGLang model."""
376
+ supported_args = [
377
+ 'max_tokens',
378
+ 'temperature',
379
+ 'top_p',
380
+ 'presence_penalty',
381
+ 'frequency_penalty',
382
+ 'extra_body',
383
+ ]
384
+ filtered_settings = {k: model_settings[k] for k in supported_args if k in model_settings}
385
+
386
+ return filtered_settings
387
+
388
+ def _format_vllm_offline_inference_kwargs( # pragma: no cover
389
+ self, model_settings: dict[str, Any]
390
+ ) -> dict[str, Any]:
391
+ """Select the model settings supported by the vLLMOffline model."""
392
+ from vllm.sampling_params import SamplingParams # pyright: ignore
393
+
394
+ supported_args = [
395
+ 'max_tokens',
396
+ 'temperature',
397
+ 'top_p',
398
+ 'seed',
399
+ 'presence_penalty',
400
+ 'frequency_penalty',
401
+ 'logit_bias',
402
+ 'extra_body',
403
+ ]
404
+ # The arguments that are part of the fields of `ModelSettings` must be put in a `SamplingParams` object and
405
+ # provided through the `sampling_params` argument to vLLM
406
+ sampling_params = model_settings.get('extra_body', {}).pop('sampling_params', SamplingParams())
407
+
408
+ for key in supported_args:
409
+ setattr(sampling_params, key, model_settings.get(key, None))
410
+
411
+ filtered_settings = {
412
+ 'sampling_params': sampling_params,
413
+ **model_settings.get('extra_body', {}),
414
+ }
415
+
416
+ return filtered_settings
417
+
418
+ async def _format_prompt( # noqa: C901
419
+ self, messages: list[ModelMessage], output_format_instructions: str | None
420
+ ) -> Chat:
421
+ """Turn the model messages into an Outlines Chat instance."""
422
+ chat = Chat()
423
+
424
+ if instructions := self._get_instructions(messages):
425
+ chat.add_system_message(instructions)
426
+
427
+ if output_format_instructions:
428
+ chat.add_system_message(output_format_instructions)
429
+
430
+ for message in messages:
431
+ if isinstance(message, ModelRequest):
432
+ for part in message.parts:
433
+ if isinstance(part, SystemPromptPart):
434
+ chat.add_system_message(part.content)
435
+ elif isinstance(part, UserPromptPart):
436
+ if isinstance(part.content, str):
437
+ chat.add_user_message(part.content)
438
+ elif isinstance(part.content, Sequence):
439
+ outlines_input: Sequence[str | Image] = []
440
+ for item in part.content:
441
+ if isinstance(item, str):
442
+ outlines_input.append(item)
443
+ elif isinstance(item, ImageUrl):
444
+ image_content: DownloadedItem[bytes] = await download_item(
445
+ item, data_format='bytes', type_format='mime'
446
+ )
447
+ image = self._create_PIL_image(image_content['data'], image_content['data_type'])
448
+ outlines_input.append(Image(image))
449
+ elif isinstance(item, BinaryContent) and item.is_image:
450
+ image = self._create_PIL_image(item.data, item.media_type)
451
+ outlines_input.append(Image(image))
452
+ else:
453
+ raise UserError(
454
+ 'Each element of the content sequence must be a string, an `ImageUrl`'
455
+ + ' or a `BinaryImage`.'
456
+ )
457
+ chat.add_user_message(outlines_input)
458
+ else:
459
+ assert_never(part.content)
460
+ elif isinstance(part, RetryPromptPart):
461
+ chat.add_user_message(part.model_response())
462
+ elif isinstance(part, ToolReturnPart):
463
+ raise UserError('Tool calls are not supported for Outlines models yet.')
464
+ else:
465
+ assert_never(part)
466
+ elif isinstance(message, ModelResponse):
467
+ text_parts: list[str] = []
468
+ image_parts: list[Image] = []
469
+ for part in message.parts:
470
+ if isinstance(part, TextPart):
471
+ text_parts.append(part.content)
472
+ elif isinstance(part, ThinkingPart):
473
+ # NOTE: We don't send ThinkingPart to the providers yet.
474
+ pass
475
+ elif isinstance(part, ToolCallPart | BuiltinToolCallPart | BuiltinToolReturnPart):
476
+ raise UserError('Tool calls are not supported for Outlines models yet.')
477
+ elif isinstance(part, FilePart):
478
+ if isinstance(part.content, BinaryContent) and part.content.is_image:
479
+ image = self._create_PIL_image(part.content.data, part.content.media_type)
480
+ image_parts.append(Image(image))
481
+ else:
482
+ raise UserError(
483
+ 'File parts other than `BinaryImage` are not supported for Outlines models yet.'
484
+ )
485
+ else:
486
+ assert_never(part)
487
+ if len(text_parts) == 1 and len(image_parts) == 0:
488
+ chat.add_assistant_message(text_parts[0])
489
+ else:
490
+ chat.add_assistant_message([*text_parts, *image_parts])
491
+ else:
492
+ assert_never(message)
493
+ return chat
494
+
495
+ def _create_PIL_image(self, data: bytes, data_type: str) -> PILImage.Image:
496
+ """Create a PIL Image from the data and data type."""
497
+ image = PILImage.open(io.BytesIO(data))
498
+ image.format = data_type.split('/')[-1]
499
+ return image
500
+
501
+ def _process_response(self, response: str) -> ModelResponse:
502
+ """Turn the Outlines text response into a Pydantic AI model response instance."""
503
+ return ModelResponse(
504
+ parts=cast(
505
+ list[ModelResponsePart], split_content_into_text_and_thinking(response, self.profile.thinking_tags)
506
+ ),
507
+ )
508
+
509
+ async def _process_streamed_response(
510
+ self, response: AsyncIterable[str], model_request_parameters: ModelRequestParameters
511
+ ) -> StreamedResponse:
512
+ """Turn the Outlines text response into a Pydantic AI streamed response instance."""
513
+ peekable_response = _utils.PeekableAsyncStream(response)
514
+ first_chunk = await peekable_response.peek()
515
+ if isinstance(first_chunk, _utils.Unset): # pragma: no cover
516
+ raise UnexpectedModelBehavior('Streamed response ended without content or tool calls')
517
+
518
+ timestamp = datetime.now(tz=timezone.utc)
519
+ return OutlinesStreamedResponse(
520
+ model_request_parameters=model_request_parameters,
521
+ _model_name=self._model_name,
522
+ _model_profile=self.profile,
523
+ _response=peekable_response,
524
+ _timestamp=timestamp,
525
+ _provider_name='outlines',
526
+ )
527
+
528
+
529
+ @dataclass
530
+ class OutlinesStreamedResponse(StreamedResponse):
531
+ """Implementation of `StreamedResponse` for Outlines models."""
532
+
533
+ _model_name: str
534
+ _model_profile: ModelProfile
535
+ _response: AsyncIterable[str]
536
+ _timestamp: datetime
537
+ _provider_name: str
538
+
539
+ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
540
+ async for event in self._response:
541
+ event = self._parts_manager.handle_text_delta(
542
+ vendor_part_id='content',
543
+ content=event,
544
+ thinking_tags=self._model_profile.thinking_tags,
545
+ ignore_leading_whitespace=self._model_profile.ignore_streamed_leading_whitespace,
546
+ )
547
+ if event is not None: # pragma: no branch
548
+ yield event
549
+
550
+ @property
551
+ def model_name(self) -> str:
552
+ """Get the model name of the response."""
553
+ return self._model_name
554
+
555
+ @property
556
+ def provider_name(self) -> str:
557
+ """Get the provider name."""
558
+ return self._provider_name
559
+
560
+ @property
561
+ def timestamp(self) -> datetime:
562
+ """Get the timestamp of the response."""
563
+ return self._timestamp
@@ -44,11 +44,14 @@ class _WrappedTextOutput:
44
44
  value: str | None
45
45
 
46
46
 
47
- @dataclass
47
+ @dataclass(init=False)
48
48
  class _WrappedToolOutput:
49
49
  """A wrapper class to tag an output that came from the custom_output_args field."""
50
50
 
51
- value: Any | None
51
+ value: dict[str, Any] | None
52
+
53
+ def __init__(self, value: Any | None):
54
+ self.value = pydantic_core.to_jsonable_python(value)
52
55
 
53
56
 
54
57
  @dataclass(init=False)
@@ -364,7 +367,7 @@ class _JsonSchemaTestData:
364
367
  self.defs = schema.get('$defs', {})
365
368
  self.seed = seed
366
369
 
367
- def generate(self) -> Any:
370
+ def generate(self) -> dict[str, Any]:
368
371
  """Generate data for the JSON schema."""
369
372
  return self._gen_any(self.schema)
370
373
 
@@ -44,6 +44,13 @@ class OpenAIModelProfile(ModelProfile):
44
44
  openai_supports_encrypted_reasoning_content: bool = False
45
45
  """Whether the model supports including encrypted reasoning content in the response."""
46
46
 
47
+ openai_responses_requires_function_call_status_none: bool = False
48
+ """Whether the Responses API requires the `status` field on function tool calls to be `None`.
49
+
50
+ This is required by vLLM Responses API versions before https://github.com/vllm-project/vllm/pull/26706.
51
+ See https://github.com/pydantic/pydantic-ai/issues/3245 for more details.
52
+ """
53
+
47
54
  def __post_init__(self): # pragma: no cover
48
55
  if not self.openai_supports_sampling_settings:
49
56
  warnings.warn(
@@ -145,6 +145,10 @@ def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
145
145
  from .ovhcloud import OVHcloudProvider
146
146
 
147
147
  return OVHcloudProvider
148
+ elif provider == 'outlines':
149
+ from .outlines import OutlinesProvider
150
+
151
+ return OutlinesProvider
148
152
  else: # pragma: no cover
149
153
  raise ValueError(f'Unknown provider: {provider}')
150
154
 
@@ -0,0 +1,40 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ from typing import Any
4
+
5
+ from pydantic_ai.profiles import ModelProfile
6
+ from pydantic_ai.providers import Provider
7
+
8
+
9
+ class OutlinesProvider(Provider[Any]):
10
+ """Provider for Outlines API."""
11
+
12
+ @property
13
+ def name(self) -> str:
14
+ """The provider name."""
15
+ return 'outlines'
16
+
17
+ @property
18
+ def base_url(self) -> str:
19
+ """The base URL for the provider API."""
20
+ raise NotImplementedError(
21
+ 'The Outlines provider does not have a set base URL as it functions '
22
+ + 'with a set of different underlying models.'
23
+ )
24
+
25
+ @property
26
+ def client(self) -> Any:
27
+ """The client for the provider."""
28
+ raise NotImplementedError(
29
+ 'The Outlines provider does not have a set client as it functions '
30
+ + 'with a set of different underlying models.'
31
+ )
32
+
33
+ def model_profile(self, model_name: str) -> ModelProfile | None:
34
+ """The model profile for the named model, if available."""
35
+ return ModelProfile(
36
+ supports_tools=False,
37
+ supports_json_schema_output=True,
38
+ supports_json_object_output=True,
39
+ default_structured_output_mode='native',
40
+ )
pydantic_ai/settings.py CHANGED
@@ -24,6 +24,7 @@ class ModelSettings(TypedDict, total=False):
24
24
  * Mistral
25
25
  * Bedrock
26
26
  * MCP Sampling
27
+ * Outlines (all providers)
27
28
  """
28
29
 
29
30
  temperature: float
@@ -43,6 +44,7 @@ class ModelSettings(TypedDict, total=False):
43
44
  * Cohere
44
45
  * Mistral
45
46
  * Bedrock
47
+ * Outlines (Transformers, LlamaCpp, SgLang, VLLMOffline)
46
48
  """
47
49
 
48
50
  top_p: float
@@ -61,6 +63,7 @@ class ModelSettings(TypedDict, total=False):
61
63
  * Cohere
62
64
  * Mistral
63
65
  * Bedrock
66
+ * Outlines (Transformers, LlamaCpp, SgLang, VLLMOffline)
64
67
  """
65
68
 
66
69
  timeout: float | Timeout
@@ -95,6 +98,7 @@ class ModelSettings(TypedDict, total=False):
95
98
  * Cohere
96
99
  * Mistral
97
100
  * Gemini
101
+ * Outlines (LlamaCpp, VLLMOffline)
98
102
  """
99
103
 
100
104
  presence_penalty: float
@@ -107,6 +111,7 @@ class ModelSettings(TypedDict, total=False):
107
111
  * Cohere
108
112
  * Gemini
109
113
  * Mistral
114
+ * Outlines (LlamaCpp, SgLang, VLLMOffline)
110
115
  """
111
116
 
112
117
  frequency_penalty: float
@@ -119,6 +124,7 @@ class ModelSettings(TypedDict, total=False):
119
124
  * Cohere
120
125
  * Gemini
121
126
  * Mistral
127
+ * Outlines (LlamaCpp, SgLang, VLLMOffline)
122
128
  """
123
129
 
124
130
  logit_bias: dict[str, int]
@@ -128,6 +134,7 @@ class ModelSettings(TypedDict, total=False):
128
134
 
129
135
  * OpenAI
130
136
  * Groq
137
+ * Outlines (Transformers, LlamaCpp, VLLMOffline)
131
138
  """
132
139
 
133
140
  stop_sequences: list[str]
@@ -162,6 +169,7 @@ class ModelSettings(TypedDict, total=False):
162
169
  * OpenAI
163
170
  * Anthropic
164
171
  * Groq
172
+ * Outlines (all providers)
165
173
  """
166
174
 
167
175
 
@@ -0,0 +1,215 @@
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ from asyncio import Lock
5
+ from contextlib import AsyncExitStack
6
+ from dataclasses import KW_ONLY, dataclass
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING, Any, Literal
9
+
10
+ from pydantic import AnyUrl
11
+ from typing_extensions import Self, assert_never
12
+
13
+ from pydantic_ai import messages
14
+ from pydantic_ai.exceptions import ModelRetry
15
+ from pydantic_ai.tools import AgentDepsT, RunContext, ToolDefinition
16
+ from pydantic_ai.toolsets import AbstractToolset
17
+ from pydantic_ai.toolsets.abstract import ToolsetTool
18
+
19
+ try:
20
+ from fastmcp.client import Client
21
+ from fastmcp.client.transports import ClientTransport
22
+ from fastmcp.exceptions import ToolError
23
+ from fastmcp.mcp_config import MCPConfig
24
+ from fastmcp.server import FastMCP
25
+ from mcp.server.fastmcp import FastMCP as FastMCP1Server
26
+ from mcp.types import (
27
+ AudioContent,
28
+ BlobResourceContents,
29
+ ContentBlock,
30
+ EmbeddedResource,
31
+ ImageContent,
32
+ ResourceLink,
33
+ TextContent,
34
+ TextResourceContents,
35
+ Tool as MCPTool,
36
+ )
37
+
38
+ from pydantic_ai.mcp import TOOL_SCHEMA_VALIDATOR
39
+
40
+ except ImportError as _import_error:
41
+ raise ImportError(
42
+ 'Please install the `fastmcp` package to use the FastMCP server, '
43
+ 'you can use the `fastmcp` optional group — `pip install "pydantic-ai-slim[fastmcp]"`'
44
+ ) from _import_error
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ from fastmcp.client.client import CallToolResult
49
+
50
+
51
+ FastMCPToolResult = messages.BinaryContent | dict[str, Any] | str | None
52
+
53
+ ToolErrorBehavior = Literal['model_retry', 'error']
54
+
55
+ UNKNOWN_BINARY_MEDIA_TYPE = 'application/octet-stream'
56
+
57
+
58
+ @dataclass(init=False)
59
+ class FastMCPToolset(AbstractToolset[AgentDepsT]):
60
+ """A FastMCP Toolset that uses the FastMCP Client to call tools from a local or remote MCP Server.
61
+
62
+ The Toolset can accept a FastMCP Client, a FastMCP Transport, or any other object which a FastMCP Transport can be created from.
63
+
64
+ See https://gofastmcp.com/clients/transports for a full list of transports available.
65
+ """
66
+
67
+ client: Client[Any]
68
+ """The FastMCP client to use."""
69
+
70
+ _: KW_ONLY
71
+
72
+ tool_error_behavior: Literal['model_retry', 'error']
73
+ """The behavior to take when a tool error occurs."""
74
+
75
+ max_retries: int
76
+ """The maximum number of retries to attempt if a tool call fails."""
77
+
78
+ _id: str | None
79
+
80
+ def __init__(
81
+ self,
82
+ client: Client[Any]
83
+ | ClientTransport
84
+ | FastMCP
85
+ | FastMCP1Server
86
+ | AnyUrl
87
+ | Path
88
+ | MCPConfig
89
+ | dict[str, Any]
90
+ | str,
91
+ *,
92
+ max_retries: int = 1,
93
+ tool_error_behavior: Literal['model_retry', 'error'] = 'model_retry',
94
+ id: str | None = None,
95
+ ) -> None:
96
+ if isinstance(client, Client):
97
+ self.client = client
98
+ else:
99
+ self.client = Client[Any](transport=client)
100
+
101
+ self._id = id
102
+ self.max_retries = max_retries
103
+ self.tool_error_behavior = tool_error_behavior
104
+
105
+ self._enter_lock: Lock = Lock()
106
+ self._running_count: int = 0
107
+ self._exit_stack: AsyncExitStack | None = None
108
+
109
+ @property
110
+ def id(self) -> str | None:
111
+ return self._id
112
+
113
+ async def __aenter__(self) -> Self:
114
+ async with self._enter_lock:
115
+ if self._running_count == 0:
116
+ self._exit_stack = AsyncExitStack()
117
+ await self._exit_stack.enter_async_context(self.client)
118
+
119
+ self._running_count += 1
120
+
121
+ return self
122
+
123
+ async def __aexit__(self, *args: Any) -> bool | None:
124
+ async with self._enter_lock:
125
+ self._running_count -= 1
126
+ if self._running_count == 0 and self._exit_stack:
127
+ await self._exit_stack.aclose()
128
+ self._exit_stack = None
129
+
130
+ return None
131
+
132
+ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
133
+ async with self:
134
+ mcp_tools: list[MCPTool] = await self.client.list_tools()
135
+
136
+ return {
137
+ tool.name: _convert_mcp_tool_to_toolset_tool(toolset=self, mcp_tool=tool, retries=self.max_retries)
138
+ for tool in mcp_tools
139
+ }
140
+
141
+ async def call_tool(
142
+ self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
143
+ ) -> Any:
144
+ async with self:
145
+ try:
146
+ call_tool_result: CallToolResult = await self.client.call_tool(name=name, arguments=tool_args)
147
+ except ToolError as e:
148
+ if self.tool_error_behavior == 'model_retry':
149
+ raise ModelRetry(message=str(e)) from e
150
+ else:
151
+ raise e
152
+
153
+ # If we have structured content, return that
154
+ if call_tool_result.structured_content:
155
+ return call_tool_result.structured_content
156
+
157
+ # Otherwise, return the content
158
+ return _map_fastmcp_tool_results(parts=call_tool_result.content)
159
+
160
+
161
+ def _convert_mcp_tool_to_toolset_tool(
162
+ toolset: FastMCPToolset[AgentDepsT],
163
+ mcp_tool: MCPTool,
164
+ retries: int,
165
+ ) -> ToolsetTool[AgentDepsT]:
166
+ """Convert an MCP tool to a toolset tool."""
167
+ return ToolsetTool[AgentDepsT](
168
+ tool_def=ToolDefinition(
169
+ name=mcp_tool.name,
170
+ description=mcp_tool.description,
171
+ parameters_json_schema=mcp_tool.inputSchema,
172
+ metadata={
173
+ 'meta': mcp_tool.meta,
174
+ 'annotations': mcp_tool.annotations.model_dump() if mcp_tool.annotations else None,
175
+ 'output_schema': mcp_tool.outputSchema or None,
176
+ },
177
+ ),
178
+ toolset=toolset,
179
+ max_retries=retries,
180
+ args_validator=TOOL_SCHEMA_VALIDATOR,
181
+ )
182
+
183
+
184
+ def _map_fastmcp_tool_results(parts: list[ContentBlock]) -> list[FastMCPToolResult] | FastMCPToolResult:
185
+ """Map FastMCP tool results to toolset tool results."""
186
+ mapped_results = [_map_fastmcp_tool_result(part) for part in parts]
187
+
188
+ if len(mapped_results) == 1:
189
+ return mapped_results[0]
190
+
191
+ return mapped_results
192
+
193
+
194
+ def _map_fastmcp_tool_result(part: ContentBlock) -> FastMCPToolResult:
195
+ if isinstance(part, TextContent):
196
+ return part.text
197
+ elif isinstance(part, ImageContent | AudioContent):
198
+ return messages.BinaryContent(data=base64.b64decode(part.data), media_type=part.mimeType)
199
+ elif isinstance(part, EmbeddedResource):
200
+ if isinstance(part.resource, BlobResourceContents):
201
+ return messages.BinaryContent(
202
+ data=base64.b64decode(part.resource.blob),
203
+ media_type=part.resource.mimeType or UNKNOWN_BINARY_MEDIA_TYPE,
204
+ )
205
+ elif isinstance(part.resource, TextResourceContents):
206
+ return part.resource.text
207
+ else:
208
+ assert_never(part.resource)
209
+ elif isinstance(part, ResourceLink):
210
+ # ResourceLink is not yet supported by the FastMCP toolset as reading resources is not yet supported.
211
+ raise NotImplementedError(
212
+ 'ResourceLink is not supported by the FastMCP toolset as reading resources is not yet supported.'
213
+ )
214
+ else:
215
+ assert_never(part)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.5.0
3
+ Version: 1.7.0
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.35
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.5.0
36
+ Requires-Dist: pydantic-graph==1.7.0
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,9 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.5.0; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.7.0; extra == 'evals'
61
+ Provides-Extra: fastmcp
62
+ Requires-Dist: fastmcp>=2.12.0; extra == 'fastmcp'
61
63
  Provides-Extra: google
62
64
  Requires-Dist: google-genai>=1.46.0; extra == 'google'
63
65
  Provides-Extra: groq
@@ -72,6 +74,20 @@ Provides-Extra: mistral
72
74
  Requires-Dist: mistralai>=1.9.10; extra == 'mistral'
73
75
  Provides-Extra: openai
74
76
  Requires-Dist: openai>=1.107.2; extra == 'openai'
77
+ Provides-Extra: outlines-llamacpp
78
+ Requires-Dist: outlines[llamacpp]<1.3.0,>=1.0.0; extra == 'outlines-llamacpp'
79
+ Provides-Extra: outlines-mlxlm
80
+ Requires-Dist: outlines[mlxlm]<1.3.0,>=1.0.0; extra == 'outlines-mlxlm'
81
+ Provides-Extra: outlines-sglang
82
+ Requires-Dist: outlines[sglang]<1.3.0,>=1.0.0; extra == 'outlines-sglang'
83
+ Requires-Dist: pillow; extra == 'outlines-sglang'
84
+ Provides-Extra: outlines-transformers
85
+ Requires-Dist: outlines[transformers]<1.3.0,>=1.0.0; extra == 'outlines-transformers'
86
+ Requires-Dist: pillow; extra == 'outlines-transformers'
87
+ Requires-Dist: transformers>=4.0.0; extra == 'outlines-transformers'
88
+ Provides-Extra: outlines-vllm-offline
89
+ Requires-Dist: outlines<1.3.0,>=1.0.0; extra == 'outlines-vllm-offline'
90
+ Requires-Dist: vllm; (python_version < '3.12') and extra == 'outlines-vllm-offline'
75
91
  Provides-Extra: prefect
76
92
  Requires-Dist: prefect>=3.4.21; extra == 'prefect'
77
93
  Provides-Extra: retries
@@ -9,7 +9,7 @@ pydantic_ai/_instrumentation.py,sha256=3XJxRUT0m2K6NfpAb-JKro4Rpw-8weqQ_ydtufeKV
9
9
  pydantic_ai/_json_schema.py,sha256=Br48srbwCTVIie98a9UEMGcCcTIa3E4zVvCbkxqQRso,7268
10
10
  pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
11
11
  pydantic_ai/_otel_messages.py,sha256=SsMpbyI1fIISOck_wQcZJPIOei8lOmvwARkdPSCx8y8,1650
12
- pydantic_ai/_output.py,sha256=gHS1qwM701cH5FGGRUrMxgWlJhY1vNgdM6ylnHRa-Ew,40784
12
+ pydantic_ai/_output.py,sha256=83Imvnwqwr-zveX_I95E24zt2Iqn-ofpd0HsbvOhS70,41274
13
13
  pydantic_ai/_parts_manager.py,sha256=05m8q2JZQk9Z8vNKOocxGDJQwYgbUGABGBRnXYJcsg8,19914
14
14
  pydantic_ai/_run_context.py,sha256=-ah9Ipf3mLTbvuYqmJSqBmBexaCcED7HGA1Llzs0dKU,2324
15
15
  pydantic_ai/_system_prompt.py,sha256=WdDW_DTGHujcFFaK-J7J6mA4ZDJZ0IOKpyizJA-1Y5Q,1142
@@ -28,7 +28,7 @@ pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
28
  pydantic_ai/result.py,sha256=sVabgrAJXmj96I7NM-w0RBz1rH5x_zZql1V6epei4JU,26700
29
29
  pydantic_ai/retries.py,sha256=QM4oDA9DG-Y2qP06fbCp8Dqq8ups40Rr4HYjAOlbNyM,14650
30
30
  pydantic_ai/run.py,sha256=5mOgh7UkLRtCjs1S85NM6OjcWvOy91VQhCkNMQQPhxs,17039
31
- pydantic_ai/settings.py,sha256=0mr6KudxKKjTG8e3nsv_8vDLxNhu_1-WvefCOzCGSYM,3565
31
+ pydantic_ai/settings.py,sha256=HlQxrw62YsXpIIhhddecYNTquDfhnpfaZU7y1p4CuVs,3935
32
32
  pydantic_ai/tools.py,sha256=dCecmJtRkF1ioqFYbfT00XGGqzGB4PPO9n6IrHCQtnc,20343
33
33
  pydantic_ai/usage.py,sha256=lhReoVNwqt7mfmWk40A1ddnKk4-MVFJ0qCl_oFdGzxo,16251
34
34
  pydantic_ai/agent/__init__.py,sha256=rvVo5Fw78yu5IOVE6ub6tmJTIuDGsIY15D3_KTFXtx4,66525
@@ -75,8 +75,9 @@ pydantic_ai/models/huggingface.py,sha256=iADyoCKYrNyjixr55rEpXW02F-sah4rLmqrThEc
75
75
  pydantic_ai/models/instrumented.py,sha256=J8eVTutr3UP1r_wd5sM5c0BIdzkRqT-EGgd2NiF0ssQ,22319
76
76
  pydantic_ai/models/mcp_sampling.py,sha256=qY4y4nXbRpNp2QbkfjzWLvF_8KLZGXypz4cc0lYRHXU,3553
77
77
  pydantic_ai/models/mistral.py,sha256=fi57hADjYxZw8wEpAcNI6mqY32VG9hHK9GGRQ-9vlZg,33905
78
- pydantic_ai/models/openai.py,sha256=wQJDGVAPzN5GNzny4ZN0CrnnrPIMxUOXQYfAtK0u7z4,108980
79
- pydantic_ai/models/test.py,sha256=5ER66nwZG7Iwm-KkzPo4vwNd3rulzgkpgysu4YcT1W4,20568
78
+ pydantic_ai/models/openai.py,sha256=1x2pXZFJfb-LGEweFNzKB77qIOWcVT4hvMEvSwBVVYE,109160
79
+ pydantic_ai/models/outlines.py,sha256=Un4KERT-jW97georXrE3iNuThFiYaYxZjGYHm2-PpD8,24270
80
+ pydantic_ai/models/test.py,sha256=cRiLD1uXKERUkBTyrVj3L5NQHoDrDqL5UU9EG_odkTg,20707
80
81
  pydantic_ai/models/wrapper.py,sha256=nwh8Gea59blbr1JDKlUnkYICuI9TUubC4qP7iZRRW28,2440
81
82
  pydantic_ai/profiles/__init__.py,sha256=UHknN-CYsQexUaxfsgz_J_uSZ9QwistLSuAErQkvbcM,3385
82
83
  pydantic_ai/profiles/amazon.py,sha256=IPa2wydpcbFLLvhDK35-pwwoKo0Pg4vP84823fHx0zc,314
@@ -90,9 +91,9 @@ pydantic_ai/profiles/harmony.py,sha256=HKOQ1QUBd9jLLabO9jMCq97d3pgAzd3Y7c_jiwPFS
90
91
  pydantic_ai/profiles/meta.py,sha256=JdZcpdRWx8PY1pU9Z2i_TYtA0Cpbg23xyFrV7eXnooY,309
91
92
  pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
92
93
  pydantic_ai/profiles/moonshotai.py,sha256=e1RJnbEvazE6aJAqfmYLYGNtwNwg52XQDRDkcLrv3fU,272
93
- pydantic_ai/profiles/openai.py,sha256=MXOsktUqfcF2pBgYJMyFWMZafPJ7tejwyoFM2mjKzaY,9689
94
+ pydantic_ai/profiles/openai.py,sha256=kve8KnvsGguioNi1gQtO7dqF8vvxR8W21fERehb3GPo,10053
94
95
  pydantic_ai/profiles/qwen.py,sha256=9SnTpMKndxNQMFyumyaOczJa5JGWbYQdpVKKW4OzKjk,749
95
- pydantic_ai/providers/__init__.py,sha256=UMgxQqav_-nxZw7oA5pUAlNJV694HwTtvMrv8WgELfI,4872
96
+ pydantic_ai/providers/__init__.py,sha256=Fwpu0w2-NpkKYQkDS2__kaWOR3dMW2KiE9v0K1EKwP4,4985
96
97
  pydantic_ai/providers/anthropic.py,sha256=vwNjO2JJ0Ux_3PXI9_XvzNZ24PKessm8z2ja1uzbBwM,3327
97
98
  pydantic_ai/providers/azure.py,sha256=PFRykTOfARMdANODnTLq__0ZynX7DlQ35GVf2Qs9VBY,5814
98
99
  pydantic_ai/providers/bedrock.py,sha256=bPbz-o3UhDzCRrg5xCrTfluLpDi2Yy9-JiCtC5mCIRk,8539
@@ -116,6 +117,7 @@ pydantic_ai/providers/nebius.py,sha256=nGpgbZnBZgNz4wHTi1vgvc-9tO2_zj5r3vRzEUbhP
116
117
  pydantic_ai/providers/ollama.py,sha256=jg48g_3fYsvK8g-V3UOmR9HOsvnvb533BAB-rZZDxdA,4733
117
118
  pydantic_ai/providers/openai.py,sha256=cVVf99GgBnYBKYeWKBscvnkoRCu0ctWuKulG19lgWMo,3401
118
119
  pydantic_ai/providers/openrouter.py,sha256=o33Fk7kMyMhEM4NcSXU6IuG0cIUc45ySaenozrRypBI,4145
120
+ pydantic_ai/providers/outlines.py,sha256=9Y3bnRKooqeUIVquexf75oGWpj8XOpJ71tBMWp0mTMQ,1251
119
121
  pydantic_ai/providers/ovhcloud.py,sha256=qvPB7-hgeClBMeNSKOiTrF-pSp6RczRaqWg5iAeUwss,3428
120
122
  pydantic_ai/providers/together.py,sha256=QtIR1BVJjoEYLvsUFpvPe81akx0iQvjYptl87XVpCpo,3441
121
123
  pydantic_ai/providers/vercel.py,sha256=AdaRmTejcr4CLPY2X0D3iZ0T4xPdUm4HAXXLS0Q0jMA,4248
@@ -125,14 +127,15 @@ pydantic_ai/toolsets/abstract.py,sha256=CXsDF37JkBWcy9hwrgdBe4gqgocNcPKOFEIvQ7t9
125
127
  pydantic_ai/toolsets/approval_required.py,sha256=zyYGEx2VqprLed16OXg1QWr81rnAB0CmAzTeyQJ9A4o,1100
126
128
  pydantic_ai/toolsets/combined.py,sha256=LQzm_g6gskiHRUMFDvm88SSrz8OGxbdxyHiKzQrMBNU,4026
127
129
  pydantic_ai/toolsets/external.py,sha256=J9mWQm1HLbRCOJwpLBIvUZZGR_ywSB7pz8MrXkRNBoU,1736
130
+ pydantic_ai/toolsets/fastmcp.py,sha256=txkWP0JfF8QXBfn7n6YGiDa01KC6nkQuCrq8TcgMLjc,7202
128
131
  pydantic_ai/toolsets/filtered.py,sha256=PSQG9EbBYJpHUEBb_4TGzhjAcQPo5aPKvTuReeoWYtQ,864
129
132
  pydantic_ai/toolsets/function.py,sha256=7QNKUddsSehwtM1kC13fVPkswzh2qa63p5wqIgrUFKk,16819
130
133
  pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fbh5s,1426
131
134
  pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
132
135
  pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
133
136
  pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
134
- pydantic_ai_slim-1.5.0.dist-info/METADATA,sha256=za8KqhM9ey6DIczK7Q5VaVYGT0BcmNJBc0RSJoZQ5P4,4703
135
- pydantic_ai_slim-1.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
136
- pydantic_ai_slim-1.5.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
137
- pydantic_ai_slim-1.5.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
138
- pydantic_ai_slim-1.5.0.dist-info/RECORD,,
137
+ pydantic_ai_slim-1.7.0.dist-info/METADATA,sha256=GxDZ2iV6RaVqJckEQSAT4GhQcvyLisSlydAsM1CdKNQ,5592
138
+ pydantic_ai_slim-1.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
139
+ pydantic_ai_slim-1.7.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
140
+ pydantic_ai_slim-1.7.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
141
+ pydantic_ai_slim-1.7.0.dist-info/RECORD,,