pydantic-ai-slim 1.2.1__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. pydantic_ai/__init__.py +6 -0
  2. pydantic_ai/_agent_graph.py +67 -20
  3. pydantic_ai/_cli.py +2 -2
  4. pydantic_ai/_output.py +20 -12
  5. pydantic_ai/_run_context.py +6 -2
  6. pydantic_ai/_utils.py +26 -8
  7. pydantic_ai/ag_ui.py +50 -696
  8. pydantic_ai/agent/__init__.py +13 -25
  9. pydantic_ai/agent/abstract.py +146 -9
  10. pydantic_ai/builtin_tools.py +106 -4
  11. pydantic_ai/direct.py +16 -4
  12. pydantic_ai/durable_exec/dbos/_agent.py +3 -0
  13. pydantic_ai/durable_exec/prefect/_agent.py +3 -0
  14. pydantic_ai/durable_exec/temporal/__init__.py +11 -0
  15. pydantic_ai/durable_exec/temporal/_agent.py +3 -0
  16. pydantic_ai/durable_exec/temporal/_function_toolset.py +23 -72
  17. pydantic_ai/durable_exec/temporal/_mcp_server.py +30 -30
  18. pydantic_ai/durable_exec/temporal/_run_context.py +7 -2
  19. pydantic_ai/durable_exec/temporal/_toolset.py +67 -3
  20. pydantic_ai/exceptions.py +6 -1
  21. pydantic_ai/mcp.py +1 -22
  22. pydantic_ai/messages.py +46 -8
  23. pydantic_ai/models/__init__.py +87 -38
  24. pydantic_ai/models/anthropic.py +132 -11
  25. pydantic_ai/models/bedrock.py +4 -4
  26. pydantic_ai/models/cohere.py +0 -7
  27. pydantic_ai/models/gemini.py +9 -2
  28. pydantic_ai/models/google.py +26 -23
  29. pydantic_ai/models/groq.py +13 -5
  30. pydantic_ai/models/huggingface.py +2 -2
  31. pydantic_ai/models/openai.py +251 -52
  32. pydantic_ai/models/outlines.py +563 -0
  33. pydantic_ai/models/test.py +6 -3
  34. pydantic_ai/profiles/openai.py +7 -0
  35. pydantic_ai/providers/__init__.py +25 -12
  36. pydantic_ai/providers/anthropic.py +2 -2
  37. pydantic_ai/providers/bedrock.py +60 -16
  38. pydantic_ai/providers/gateway.py +60 -72
  39. pydantic_ai/providers/google.py +91 -24
  40. pydantic_ai/providers/openrouter.py +3 -0
  41. pydantic_ai/providers/outlines.py +40 -0
  42. pydantic_ai/providers/ovhcloud.py +95 -0
  43. pydantic_ai/result.py +173 -8
  44. pydantic_ai/run.py +40 -24
  45. pydantic_ai/settings.py +8 -0
  46. pydantic_ai/tools.py +10 -6
  47. pydantic_ai/toolsets/fastmcp.py +215 -0
  48. pydantic_ai/ui/__init__.py +16 -0
  49. pydantic_ai/ui/_adapter.py +386 -0
  50. pydantic_ai/ui/_event_stream.py +591 -0
  51. pydantic_ai/ui/_messages_builder.py +28 -0
  52. pydantic_ai/ui/ag_ui/__init__.py +9 -0
  53. pydantic_ai/ui/ag_ui/_adapter.py +187 -0
  54. pydantic_ai/ui/ag_ui/_event_stream.py +236 -0
  55. pydantic_ai/ui/ag_ui/app.py +148 -0
  56. pydantic_ai/ui/vercel_ai/__init__.py +16 -0
  57. pydantic_ai/ui/vercel_ai/_adapter.py +199 -0
  58. pydantic_ai/ui/vercel_ai/_event_stream.py +187 -0
  59. pydantic_ai/ui/vercel_ai/_utils.py +16 -0
  60. pydantic_ai/ui/vercel_ai/request_types.py +275 -0
  61. pydantic_ai/ui/vercel_ai/response_types.py +230 -0
  62. pydantic_ai/usage.py +13 -2
  63. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/METADATA +23 -5
  64. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/RECORD +67 -49
  65. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/WHEEL +0 -0
  66. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/entry_points.txt +0 -0
  67. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -37,7 +37,7 @@ from ..messages import (
37
37
  VideoUrl,
38
38
  )
39
39
  from ..profiles import ModelProfileSpec
40
- from ..providers import Provider
40
+ from ..providers import Provider, infer_provider
41
41
  from ..settings import ModelSettings
42
42
  from ..tools import ToolDefinition
43
43
  from . import (
@@ -85,8 +85,6 @@ try:
85
85
  UrlContextDict,
86
86
  VideoMetadataDict,
87
87
  )
88
-
89
- from ..providers.google import GoogleProvider
90
88
  except ImportError as _import_error:
91
89
  raise ImportError(
92
90
  'Please install `google-genai` to use the Google model, '
@@ -128,6 +126,8 @@ _FINISH_REASON_MAP: dict[GoogleFinishReason, FinishReason | None] = {
128
126
  GoogleFinishReason.MALFORMED_FUNCTION_CALL: 'error',
129
127
  GoogleFinishReason.IMAGE_SAFETY: 'content_filter',
130
128
  GoogleFinishReason.UNEXPECTED_TOOL_CALL: 'error',
129
+ GoogleFinishReason.IMAGE_PROHIBITED_CONTENT: 'content_filter',
130
+ GoogleFinishReason.NO_IMAGE: 'error',
131
131
  }
132
132
 
133
133
 
@@ -187,7 +187,7 @@ class GoogleModel(Model):
187
187
  self,
188
188
  model_name: GoogleModelName,
189
189
  *,
190
- provider: Literal['google-gla', 'google-vertex'] | Provider[Client] = 'google-gla',
190
+ provider: Literal['google-gla', 'google-vertex', 'gateway'] | Provider[Client] = 'google-gla',
191
191
  profile: ModelProfileSpec | None = None,
192
192
  settings: ModelSettings | None = None,
193
193
  ):
@@ -196,15 +196,15 @@ class GoogleModel(Model):
196
196
  Args:
197
197
  model_name: The name of the model to use.
198
198
  provider: The provider to use for authentication and API access. Can be either the string
199
- 'google-gla' or 'google-vertex' or an instance of `Provider[httpx.AsyncClient]`.
200
- If not provided, a new provider will be created using the other parameters.
199
+ 'google-gla' or 'google-vertex' or an instance of `Provider[google.genai.AsyncClient]`.
200
+ Defaults to 'google-gla'.
201
201
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
202
202
  settings: The model settings to use. Defaults to None.
203
203
  """
204
204
  self._model_name = model_name
205
205
 
206
206
  if isinstance(provider, str):
207
- provider = GoogleProvider(vertexai=provider == 'google-vertex')
207
+ provider = infer_provider('gateway/google-vertex' if provider == 'gateway' else provider)
208
208
  self._provider = provider
209
209
  self.client = provider.client
210
210
 
@@ -455,23 +455,26 @@ class GoogleModel(Model):
455
455
  def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
456
456
  if not response.candidates:
457
457
  raise UnexpectedModelBehavior('Expected at least one candidate in Gemini response') # pragma: no cover
458
+
458
459
  candidate = response.candidates[0]
459
- if candidate.content is None or candidate.content.parts is None:
460
- if candidate.finish_reason == 'SAFETY':
461
- raise UnexpectedModelBehavior('Safety settings triggered', str(response))
462
- else:
463
- raise UnexpectedModelBehavior(
464
- 'Content field missing from Gemini response', str(response)
465
- ) # pragma: no cover
466
- parts = candidate.content.parts or []
467
460
 
468
461
  vendor_id = response.response_id
469
462
  vendor_details: dict[str, Any] | None = None
470
463
  finish_reason: FinishReason | None = None
471
- if raw_finish_reason := candidate.finish_reason: # pragma: no branch
464
+ raw_finish_reason = candidate.finish_reason
465
+ if raw_finish_reason: # pragma: no branch
472
466
  vendor_details = {'finish_reason': raw_finish_reason.value}
473
467
  finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
474
468
 
469
+ if candidate.content is None or candidate.content.parts is None:
470
+ if finish_reason == 'content_filter' and raw_finish_reason:
471
+ raise UnexpectedModelBehavior(
472
+ f'Content filter {raw_finish_reason.value!r} triggered', response.model_dump_json()
473
+ )
474
+ parts = [] # pragma: no cover
475
+ else:
476
+ parts = candidate.content.parts or []
477
+
475
478
  usage = _metadata_as_usage(response)
476
479
  return _process_response_from_parts(
477
480
  parts,
@@ -625,7 +628,8 @@ class GeminiStreamedResponse(StreamedResponse):
625
628
  if chunk.response_id: # pragma: no branch
626
629
  self.provider_response_id = chunk.response_id
627
630
 
628
- if raw_finish_reason := candidate.finish_reason:
631
+ raw_finish_reason = candidate.finish_reason
632
+ if raw_finish_reason:
629
633
  self.provider_details = {'finish_reason': raw_finish_reason.value}
630
634
  self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
631
635
 
@@ -643,13 +647,12 @@ class GeminiStreamedResponse(StreamedResponse):
643
647
  # )
644
648
 
645
649
  if candidate.content is None or candidate.content.parts is None:
646
- if candidate.finish_reason == 'STOP': # pragma: no cover
647
- # Normal completion - skip this chunk
648
- continue
649
- elif candidate.finish_reason == 'SAFETY': # pragma: no cover
650
- raise UnexpectedModelBehavior('Safety settings triggered', str(chunk))
650
+ if self.finish_reason == 'content_filter' and raw_finish_reason: # pragma: no cover
651
+ raise UnexpectedModelBehavior(
652
+ f'Content filter {raw_finish_reason.value!r} triggered', chunk.model_dump_json()
653
+ )
651
654
  else: # pragma: no cover
652
- raise UnexpectedModelBehavior('Content field missing from streaming Gemini response', str(chunk))
655
+ continue
653
656
 
654
657
  parts = candidate.content.parts
655
658
  if not parts:
@@ -141,7 +141,7 @@ class GroqModel(Model):
141
141
  self,
142
142
  model_name: GroqModelName,
143
143
  *,
144
- provider: Literal['groq'] | Provider[AsyncGroq] = 'groq',
144
+ provider: Literal['groq', 'gateway'] | Provider[AsyncGroq] = 'groq',
145
145
  profile: ModelProfileSpec | None = None,
146
146
  settings: ModelSettings | None = None,
147
147
  ):
@@ -159,7 +159,7 @@ class GroqModel(Model):
159
159
  self._model_name = model_name
160
160
 
161
161
  if isinstance(provider, str):
162
- provider = infer_provider(provider)
162
+ provider = infer_provider('gateway/groq' if provider == 'gateway' else provider)
163
163
  self._provider = provider
164
164
  self.client = provider.client
165
165
 
@@ -330,7 +330,7 @@ class GroqModel(Model):
330
330
  if call_part and return_part: # pragma: no branch
331
331
  items.append(call_part)
332
332
  items.append(return_part)
333
- if choice.message.content is not None:
333
+ if choice.message.content:
334
334
  # NOTE: The `<think>` tag is only present if `groq_reasoning_format` is set to `raw`.
335
335
  items.extend(split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags))
336
336
  if choice.message.tool_calls is not None:
@@ -524,6 +524,8 @@ class GroqStreamedResponse(StreamedResponse):
524
524
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
525
525
  try:
526
526
  executed_tool_call_id: str | None = None
527
+ reasoning_index = 0
528
+ reasoning = False
527
529
  async for chunk in self._response:
528
530
  self._usage += _map_usage(chunk)
529
531
 
@@ -540,10 +542,16 @@ class GroqStreamedResponse(StreamedResponse):
540
542
  self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
541
543
 
542
544
  if choice.delta.reasoning is not None:
545
+ if not reasoning:
546
+ reasoning_index += 1
547
+ reasoning = True
548
+
543
549
  # NOTE: The `reasoning` field is only present if `groq_reasoning_format` is set to `parsed`.
544
550
  yield self._parts_manager.handle_thinking_delta(
545
- vendor_part_id='reasoning', content=choice.delta.reasoning
551
+ vendor_part_id=f'reasoning-{reasoning_index}', content=choice.delta.reasoning
546
552
  )
553
+ else:
554
+ reasoning = False
547
555
 
548
556
  if choice.delta.executed_tools:
549
557
  for tool in choice.delta.executed_tools:
@@ -563,7 +571,7 @@ class GroqStreamedResponse(StreamedResponse):
563
571
 
564
572
  # Handle the text part of the response
565
573
  content = choice.delta.content
566
- if content is not None:
574
+ if content:
567
575
  maybe_event = self._parts_manager.handle_text_delta(
568
576
  vendor_part_id='content',
569
577
  content=content,
@@ -277,7 +277,7 @@ class HuggingFaceModel(Model):
277
277
 
278
278
  items: list[ModelResponsePart] = []
279
279
 
280
- if content is not None:
280
+ if content:
281
281
  items.extend(split_content_into_text_and_thinking(content, self.profile.thinking_tags))
282
282
  if tool_calls is not None:
283
283
  for c in tool_calls:
@@ -482,7 +482,7 @@ class HuggingFaceStreamedResponse(StreamedResponse):
482
482
 
483
483
  # Handle the text part of the response
484
484
  content = choice.delta.content
485
- if content is not None:
485
+ if content:
486
486
  maybe_event = self._parts_manager.handle_text_delta(
487
487
  vendor_part_id='content',
488
488
  content=content,