pydantic-ai-slim 1.0.14__py3-none-any.whl → 1.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (38) hide show
  1. pydantic_ai/__init__.py +19 -1
  2. pydantic_ai/_agent_graph.py +116 -93
  3. pydantic_ai/_cli.py +4 -7
  4. pydantic_ai/_output.py +236 -192
  5. pydantic_ai/_parts_manager.py +8 -42
  6. pydantic_ai/_tool_manager.py +9 -16
  7. pydantic_ai/agent/abstract.py +169 -1
  8. pydantic_ai/builtin_tools.py +82 -0
  9. pydantic_ai/direct.py +7 -0
  10. pydantic_ai/durable_exec/dbos/_agent.py +106 -3
  11. pydantic_ai/durable_exec/temporal/_agent.py +123 -6
  12. pydantic_ai/durable_exec/temporal/_model.py +8 -0
  13. pydantic_ai/format_prompt.py +4 -3
  14. pydantic_ai/mcp.py +20 -10
  15. pydantic_ai/messages.py +149 -3
  16. pydantic_ai/models/__init__.py +15 -1
  17. pydantic_ai/models/anthropic.py +7 -3
  18. pydantic_ai/models/cohere.py +4 -0
  19. pydantic_ai/models/function.py +7 -4
  20. pydantic_ai/models/gemini.py +8 -0
  21. pydantic_ai/models/google.py +56 -23
  22. pydantic_ai/models/groq.py +11 -5
  23. pydantic_ai/models/huggingface.py +5 -3
  24. pydantic_ai/models/mistral.py +6 -8
  25. pydantic_ai/models/openai.py +197 -57
  26. pydantic_ai/models/test.py +4 -0
  27. pydantic_ai/output.py +5 -2
  28. pydantic_ai/profiles/__init__.py +2 -0
  29. pydantic_ai/profiles/google.py +5 -2
  30. pydantic_ai/profiles/openai.py +2 -1
  31. pydantic_ai/result.py +46 -30
  32. pydantic_ai/run.py +35 -7
  33. pydantic_ai/usage.py +5 -4
  34. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/METADATA +3 -3
  35. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/RECORD +38 -38
  36. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/WHEEL +0 -0
  37. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/entry_points.txt +0 -0
  38. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/licenses/LICENSE +0 -0
@@ -21,6 +21,7 @@ from ..messages import (
21
21
  BinaryContent,
22
22
  BuiltinToolCallPart,
23
23
  BuiltinToolReturnPart,
24
+ FilePart,
24
25
  FileUrl,
25
26
  ModelMessage,
26
27
  ModelRequest,
@@ -46,7 +47,11 @@ LatestGeminiModelNames = Literal[
46
47
  'gemini-2.0-flash',
47
48
  'gemini-2.0-flash-lite',
48
49
  'gemini-2.5-flash',
50
+ 'gemini-2.5-flash-preview-09-2025',
49
51
  'gemini-2.5-flash-lite',
52
+ 'gemini-2.5-flash-lite-preview-09-2025',
53
+ 'gemini-flash-latest',
54
+ 'gemini-flash-lite-latest',
50
55
  'gemini-2.5-pro',
51
56
  ]
52
57
  """Latest Gemini models."""
@@ -628,6 +633,9 @@ def _content_model_response(m: ModelResponse) -> _GeminiContent:
628
633
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
629
634
  # This is currently never returned from gemini
630
635
  pass
636
+ elif isinstance(item, FilePart): # pragma: no cover
637
+ # Files generated by models are not sent back to models that don't themselves generate files.
638
+ pass
631
639
  else:
632
640
  assert_never(item)
633
641
  return _GeminiContent(role='model', parts=parts)
@@ -13,12 +13,13 @@ from typing_extensions import assert_never
13
13
  from .. import UnexpectedModelBehavior, _utils, usage
14
14
  from .._output import OutputObjectDefinition
15
15
  from .._run_context import RunContext
16
- from ..builtin_tools import CodeExecutionTool, UrlContextTool, WebSearchTool
16
+ from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, UrlContextTool, WebSearchTool
17
17
  from ..exceptions import UserError
18
18
  from ..messages import (
19
19
  BinaryContent,
20
20
  BuiltinToolCallPart,
21
21
  BuiltinToolReturnPart,
22
+ FilePart,
22
23
  FileUrl,
23
24
  FinishReason,
24
25
  ModelMessage,
@@ -95,7 +96,11 @@ LatestGoogleModelNames = Literal[
95
96
  'gemini-2.0-flash',
96
97
  'gemini-2.0-flash-lite',
97
98
  'gemini-2.5-flash',
99
+ 'gemini-2.5-flash-preview-09-2025',
100
+ 'gemini-flash-latest',
98
101
  'gemini-2.5-flash-lite',
102
+ 'gemini-2.5-flash-lite-preview-09-2025',
103
+ 'gemini-flash-lite-latest',
99
104
  'gemini-2.5-pro',
100
105
  ]
101
106
  """Latest Gemini models."""
@@ -308,6 +313,11 @@ class GoogleModel(Model):
308
313
  yield await self._process_streamed_response(response, model_request_parameters) # type: ignore
309
314
 
310
315
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ToolDict] | None:
316
+ tools: list[ToolDict] = [
317
+ ToolDict(function_declarations=[_function_declaration_from_tool(t)])
318
+ for t in model_request_parameters.tool_defs.values()
319
+ ]
320
+
311
321
  if model_request_parameters.builtin_tools:
312
322
  if model_request_parameters.output_tools:
313
323
  raise UserError(
@@ -316,21 +326,22 @@ class GoogleModel(Model):
316
326
  if model_request_parameters.function_tools:
317
327
  raise UserError('Gemini does not support user tools and built-in tools at the same time.')
318
328
 
319
- tools: list[ToolDict] = [
320
- ToolDict(function_declarations=[_function_declaration_from_tool(t)])
321
- for t in model_request_parameters.tool_defs.values()
322
- ]
323
- for tool in model_request_parameters.builtin_tools:
324
- if isinstance(tool, WebSearchTool):
325
- tools.append(ToolDict(google_search=GoogleSearchDict()))
326
- elif isinstance(tool, UrlContextTool):
327
- tools.append(ToolDict(url_context=UrlContextDict()))
328
- elif isinstance(tool, CodeExecutionTool): # pragma: no branch
329
- tools.append(ToolDict(code_execution=ToolCodeExecutionDict()))
330
- else: # pragma: no cover
331
- raise UserError(
332
- f'`{tool.__class__.__name__}` is not supported by `GoogleModel`. If it should be, please file an issue.'
333
- )
329
+ for tool in model_request_parameters.builtin_tools:
330
+ if isinstance(tool, WebSearchTool):
331
+ tools.append(ToolDict(google_search=GoogleSearchDict()))
332
+ elif isinstance(tool, UrlContextTool):
333
+ tools.append(ToolDict(url_context=UrlContextDict()))
334
+ elif isinstance(tool, CodeExecutionTool):
335
+ tools.append(ToolDict(code_execution=ToolCodeExecutionDict()))
336
+ elif isinstance(tool, ImageGenerationTool): # pragma: no branch
337
+ if not self.profile.supports_image_output:
338
+ raise UserError(
339
+ "`ImageGenerationTool` is not supported by this model. Use a model with 'image' in the name instead."
340
+ )
341
+ else: # pragma: no cover
342
+ raise UserError(
343
+ f'`{tool.__class__.__name__}` is not supported by `GoogleModel`. If it should be, please file an issue.'
344
+ )
334
345
  return tools or None
335
346
 
336
347
  def _get_tool_config(
@@ -382,6 +393,9 @@ class GoogleModel(Model):
382
393
  model_request_parameters: ModelRequestParameters,
383
394
  ) -> tuple[list[ContentUnionDict], GenerateContentConfigDict]:
384
395
  tools = self._get_tools(model_request_parameters)
396
+ if tools and not self.profile.supports_tools:
397
+ raise UserError('Tools are not supported by this model.')
398
+
385
399
  response_mime_type = None
386
400
  response_schema = None
387
401
  if model_request_parameters.output_mode == 'native':
@@ -394,6 +408,8 @@ class GoogleModel(Model):
394
408
  assert output_object is not None
395
409
  response_schema = self._map_response_schema(output_object)
396
410
  elif model_request_parameters.output_mode == 'prompted' and not tools:
411
+ if not self.profile.supports_json_object_output:
412
+ raise UserError('JSON output is not supported by this model.')
397
413
  response_mime_type = 'application/json'
398
414
 
399
415
  tool_config = self._get_tool_config(model_request_parameters, tools)
@@ -615,8 +631,8 @@ class GeminiStreamedResponse(StreamedResponse):
615
631
  # candidate.grounding_metadata, self.provider_name
616
632
  # )
617
633
  # if web_search_call and web_search_return:
618
- # yield self._parts_manager.handle_builtin_tool_call_part(vendor_part_id=uuid4(), part=web_search_call)
619
- # yield self._parts_manager.handle_builtin_tool_return_part(
634
+ # yield self._parts_manager.handle_part(vendor_part_id=uuid4(), part=web_search_call)
635
+ # yield self._parts_manager.handle_part(
620
636
  # vendor_part_id=uuid4(), part=web_search_return
621
637
  # )
622
638
 
@@ -658,9 +674,18 @@ class GeminiStreamedResponse(StreamedResponse):
658
674
  )
659
675
  if maybe_event is not None: # pragma: no branch
660
676
  yield maybe_event
677
+ elif part.inline_data is not None:
678
+ data = part.inline_data.data
679
+ mime_type = part.inline_data.mime_type
680
+ assert data and mime_type, 'Inline data must have data and mime type'
681
+ content = BinaryContent(data=data, media_type=mime_type)
682
+ yield self._parts_manager.handle_part(
683
+ vendor_part_id=uuid4(),
684
+ part=FilePart(content=BinaryContent.narrow_type(content)),
685
+ )
661
686
  elif part.executable_code is not None:
662
687
  code_execution_tool_call_id = _utils.generate_tool_call_id()
663
- yield self._parts_manager.handle_builtin_tool_call_part(
688
+ yield self._parts_manager.handle_part(
664
689
  vendor_part_id=uuid4(),
665
690
  part=_map_executable_code(
666
691
  part.executable_code, self.provider_name, code_execution_tool_call_id
@@ -668,7 +693,7 @@ class GeminiStreamedResponse(StreamedResponse):
668
693
  )
669
694
  elif part.code_execution_result is not None:
670
695
  assert code_execution_tool_call_id is not None
671
- yield self._parts_manager.handle_builtin_tool_return_part(
696
+ yield self._parts_manager.handle_part(
672
697
  vendor_part_id=uuid4(),
673
698
  part=_map_code_execution_result(
674
699
  part.code_execution_result, self.provider_name, code_execution_tool_call_id
@@ -729,6 +754,10 @@ def _content_model_response(m: ModelResponse, provider_name: str) -> ContentDict
729
754
  elif item.tool_name == WebSearchTool.kind:
730
755
  # Web search results are not sent back
731
756
  pass
757
+ elif isinstance(item, FilePart):
758
+ content = item.content
759
+ inline_data_dict: BlobDict = {'data': content.data, 'mime_type': content.media_type}
760
+ part['inline_data'] = inline_data_dict
732
761
  else:
733
762
  assert_never(item)
734
763
 
@@ -784,10 +813,14 @@ def _process_response_from_parts(
784
813
  item = ToolCallPart(tool_name=part.function_call.name, args=part.function_call.args)
785
814
  if part.function_call.id is not None:
786
815
  item.tool_call_id = part.function_call.id # pragma: no cover
816
+ elif inline_data := part.inline_data:
817
+ data = inline_data.data
818
+ mime_type = inline_data.mime_type
819
+ assert data and mime_type, 'Inline data must have data and mime type'
820
+ content = BinaryContent(data=data, media_type=mime_type)
821
+ item = FilePart(content=BinaryContent.narrow_type(content))
787
822
  else: # pragma: no cover
788
- raise UnexpectedModelBehavior(
789
- f'Unsupported response from Gemini, expected all parts to be function calls, text, or thoughts, got: {part!r}'
790
- )
823
+ raise UnexpectedModelBehavior(f'Unsupported response from Gemini: {part!r}')
791
824
 
792
825
  items.append(item)
793
826
  return ModelResponse(
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
- import base64
4
3
  from collections.abc import AsyncIterable, AsyncIterator, Iterable
5
4
  from contextlib import asynccontextmanager
6
5
  from dataclasses import dataclass, field
@@ -23,6 +22,7 @@ from ..messages import (
23
22
  BuiltinToolCallPart,
24
23
  BuiltinToolReturnPart,
25
24
  DocumentUrl,
25
+ FilePart,
26
26
  FinishReason,
27
27
  ImageUrl,
28
28
  ModelMessage,
@@ -117,6 +117,10 @@ class GroqModelSettings(ModelSettings, total=False):
117
117
  # ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
118
118
 
119
119
  groq_reasoning_format: Literal['hidden', 'raw', 'parsed']
120
+ """The format of the reasoning output.
121
+
122
+ See [the Groq docs](https://console.groq.com/docs/reasoning#reasoning-format) for more details.
123
+ """
120
124
 
121
125
 
122
126
  @dataclass(init=False)
@@ -404,6 +408,9 @@ class GroqModel(Model):
404
408
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
405
409
  # These are not currently sent back
406
410
  pass
411
+ elif isinstance(item, FilePart): # pragma: no cover
412
+ # Files generated by models are not sent back to models that don't themselves generate files.
413
+ pass
407
414
  else:
408
415
  assert_never(item)
409
416
  message_param = chat.ChatCompletionAssistantMessageParam(role='assistant')
@@ -491,9 +498,8 @@ class GroqModel(Model):
491
498
  image_url = ImageURL(url=item.url)
492
499
  content.append(chat.ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
493
500
  elif isinstance(item, BinaryContent):
494
- base64_encoded = base64.b64encode(item.data).decode('utf-8')
495
501
  if item.is_image:
496
- image_url = ImageURL(url=f'data:{item.media_type};base64,{base64_encoded}')
502
+ image_url = ImageURL(url=item.data_uri)
497
503
  content.append(chat.ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
498
504
  else:
499
505
  raise RuntimeError('Only images are supported for binary content in Groq.')
@@ -546,12 +552,12 @@ class GroqStreamedResponse(StreamedResponse):
546
552
  )
547
553
  if call_part:
548
554
  executed_tool_call_id = call_part.tool_call_id
549
- yield self._parts_manager.handle_builtin_tool_call_part(
555
+ yield self._parts_manager.handle_part(
550
556
  vendor_part_id=f'executed_tools-{tool.index}-call', part=call_part
551
557
  )
552
558
  if return_part:
553
559
  executed_tool_call_id = None
554
- yield self._parts_manager.handle_builtin_tool_return_part(
560
+ yield self._parts_manager.handle_part(
555
561
  vendor_part_id=f'executed_tools-{tool.index}-return', part=return_part
556
562
  )
557
563
 
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
- import base64
4
3
  from collections.abc import AsyncIterable, AsyncIterator
5
4
  from contextlib import asynccontextmanager
6
5
  from dataclasses import dataclass, field
@@ -20,6 +19,7 @@ from ..messages import (
20
19
  BuiltinToolCallPart,
21
20
  BuiltinToolReturnPart,
22
21
  DocumentUrl,
22
+ FilePart,
23
23
  FinishReason,
24
24
  ImageUrl,
25
25
  ModelMessage,
@@ -344,6 +344,9 @@ class HuggingFaceModel(Model):
344
344
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
345
345
  # This is currently never returned from huggingface
346
346
  pass
347
+ elif isinstance(item, FilePart): # pragma: no cover
348
+ # Files generated by models are not sent back to models that don't themselves generate files.
349
+ pass
347
350
  else:
348
351
  assert_never(item)
349
352
  message_param = ChatCompletionInputMessage(role='assistant') # type: ignore
@@ -433,9 +436,8 @@ class HuggingFaceModel(Model):
433
436
  url = ChatCompletionInputURL(url=item.url) # type: ignore
434
437
  content.append(ChatCompletionInputMessageChunk(type='image_url', image_url=url)) # type: ignore
435
438
  elif isinstance(item, BinaryContent):
436
- base64_encoded = base64.b64encode(item.data).decode('utf-8')
437
439
  if item.is_image:
438
- url = ChatCompletionInputURL(url=f'data:{item.media_type};base64,{base64_encoded}') # type: ignore
440
+ url = ChatCompletionInputURL(url=item.data_uri) # type: ignore
439
441
  content.append(ChatCompletionInputMessageChunk(type='image_url', image_url=url)) # type: ignore
440
442
  else: # pragma: no cover
441
443
  raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
- import base64
4
3
  from collections.abc import AsyncIterable, AsyncIterator, Iterable
5
4
  from contextlib import asynccontextmanager
6
5
  from dataclasses import dataclass, field
@@ -20,6 +19,7 @@ from ..messages import (
20
19
  BuiltinToolCallPart,
21
20
  BuiltinToolReturnPart,
22
21
  DocumentUrl,
22
+ FilePart,
23
23
  FinishReason,
24
24
  ImageUrl,
25
25
  ModelMessage,
@@ -544,6 +544,9 @@ class MistralModel(Model):
544
544
  elif isinstance(part, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
545
545
  # This is currently never returned from mistral
546
546
  pass
547
+ elif isinstance(part, FilePart): # pragma: no cover
548
+ # Files generated by models are not sent back to models that don't themselves generate files.
549
+ pass
547
550
  else:
548
551
  assert_never(part)
549
552
  if thinking_chunks:
@@ -580,16 +583,11 @@ class MistralModel(Model):
580
583
  elif isinstance(item, ImageUrl):
581
584
  content.append(MistralImageURLChunk(image_url=MistralImageURL(url=item.url)))
582
585
  elif isinstance(item, BinaryContent):
583
- base64_encoded = base64.b64encode(item.data).decode('utf-8')
584
586
  if item.is_image:
585
- image_url = MistralImageURL(url=f'data:{item.media_type};base64,{base64_encoded}')
587
+ image_url = MistralImageURL(url=item.data_uri)
586
588
  content.append(MistralImageURLChunk(image_url=image_url, type='image_url'))
587
589
  elif item.media_type == 'application/pdf':
588
- content.append(
589
- MistralDocumentURLChunk(
590
- document_url=f'data:application/pdf;base64,{base64_encoded}', type='document_url'
591
- )
592
- )
590
+ content.append(MistralDocumentURLChunk(document_url=item.data_uri, type='document_url'))
593
591
  else:
594
592
  raise RuntimeError('BinaryContent other than image or PDF is not supported in Mistral.')
595
593
  elif isinstance(item, DocumentUrl):