pydantic-ai-slim 0.7.4__py3-none-any.whl → 0.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -0,0 +1,67 @@
1
+ """Type definitions of OpenTelemetry GenAI spec message parts.
2
+
3
+ Based on https://github.com/lmolkova/semantic-conventions/blob/eccd1f806e426a32c98271c3ce77585492d26de2/docs/gen-ai/non-normative/models.ipynb
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import Literal
9
+
10
+ from pydantic import JsonValue
11
+ from typing_extensions import NotRequired, TypeAlias, TypedDict
12
+
13
+
14
+ class TextPart(TypedDict):
15
+ type: Literal['text']
16
+ content: NotRequired[str]
17
+
18
+
19
+ class ToolCallPart(TypedDict):
20
+ type: Literal['tool_call']
21
+ id: str
22
+ name: str
23
+ arguments: NotRequired[JsonValue]
24
+
25
+
26
+ class ToolCallResponsePart(TypedDict):
27
+ type: Literal['tool_call_response']
28
+ id: str
29
+ name: str
30
+ result: NotRequired[JsonValue]
31
+
32
+
33
+ class MediaUrlPart(TypedDict):
34
+ type: Literal['image-url', 'audio-url', 'video-url', 'document-url']
35
+ url: NotRequired[str]
36
+
37
+
38
+ class BinaryDataPart(TypedDict):
39
+ type: Literal['binary']
40
+ media_type: str
41
+ content: NotRequired[str]
42
+
43
+
44
+ class ThinkingPart(TypedDict):
45
+ type: Literal['thinking']
46
+ content: NotRequired[str]
47
+
48
+
49
+ MessagePart: TypeAlias = 'TextPart | ToolCallPart | ToolCallResponsePart | MediaUrlPart | BinaryDataPart | ThinkingPart'
50
+
51
+
52
+ Role = Literal['system', 'user', 'assistant']
53
+
54
+
55
+ class ChatMessage(TypedDict):
56
+ role: Role
57
+ parts: list[MessagePart]
58
+
59
+
60
+ InputMessages: TypeAlias = list[ChatMessage]
61
+
62
+
63
+ class OutputMessage(ChatMessage):
64
+ finish_reason: NotRequired[str]
65
+
66
+
67
+ OutputMessages: TypeAlias = list[OutputMessage]
@@ -29,6 +29,7 @@ class WebSearchTool(AbstractBuiltinTool):
29
29
  * Anthropic
30
30
  * OpenAI
31
31
  * Groq
32
+ * Google
32
33
  """
33
34
 
34
35
  search_context_size: Literal['low', 'medium', 'high'] = 'medium'
@@ -55,6 +55,10 @@ class TemporalStreamedResponse(StreamedResponse):
55
55
  def model_name(self) -> str:
56
56
  return self.response.model_name or '' # pragma: no cover
57
57
 
58
+ @property
59
+ def provider_name(self) -> str:
60
+ return self.response.provider_name or '' # pragma: no cover
61
+
58
62
  @property
59
63
  def timestamp(self) -> datetime:
60
64
  return self.response.timestamp # pragma: no cover
pydantic_ai/messages.py CHANGED
@@ -10,10 +10,11 @@ from typing import TYPE_CHECKING, Annotated, Any, Literal, Union, cast, overload
10
10
 
11
11
  import pydantic
12
12
  import pydantic_core
13
+ from genai_prices import calc_price, types as genai_types
13
14
  from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage]
14
15
  from typing_extensions import TypeAlias, deprecated
15
16
 
16
- from . import _utils
17
+ from . import _otel_messages, _utils
17
18
  from ._utils import (
18
19
  generate_tool_call_id as _generate_tool_call_id,
19
20
  now_utc as _now_utc,
@@ -82,6 +83,9 @@ class SystemPromptPart:
82
83
  body={'role': 'system', **({'content': self.content} if settings.include_content else {})},
83
84
  )
84
85
 
86
+ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
87
+ return [_otel_messages.TextPart(type='text', **{'content': self.content} if settings.include_content else {})]
88
+
85
89
  __repr__ = _utils.dataclasses_no_defaults_repr
86
90
 
87
91
 
@@ -504,25 +508,41 @@ class UserPromptPart:
504
508
  """Part type identifier, this is available on all parts as a discriminator."""
505
509
 
506
510
  def otel_event(self, settings: InstrumentationSettings) -> Event:
507
- content: str | list[dict[str, Any] | str] | dict[str, Any]
508
- if isinstance(self.content, str):
509
- content = self.content if settings.include_content else {'kind': 'text'}
510
- else:
511
- content = []
512
- for part in self.content:
513
- if isinstance(part, str):
514
- content.append(part if settings.include_content else {'kind': 'text'})
515
- elif isinstance(part, (ImageUrl, AudioUrl, DocumentUrl, VideoUrl)):
516
- content.append({'kind': part.kind, **({'url': part.url} if settings.include_content else {})})
517
- elif isinstance(part, BinaryContent):
518
- converted_part = {'kind': part.kind, 'media_type': part.media_type}
519
- if settings.include_content and settings.include_binary_content:
520
- converted_part['binary_content'] = base64.b64encode(part.data).decode()
521
- content.append(converted_part)
522
- else:
523
- content.append({'kind': part.kind}) # pragma: no cover
511
+ content = [{'kind': part.pop('type'), **part} for part in self.otel_message_parts(settings)]
512
+ for part in content:
513
+ if part['kind'] == 'binary' and 'content' in part:
514
+ part['binary_content'] = part.pop('content')
515
+ content = [
516
+ part['content'] if part == {'kind': 'text', 'content': part.get('content')} else part for part in content
517
+ ]
518
+ if content in ([{'kind': 'text'}], [self.content]):
519
+ content = content[0]
524
520
  return Event('gen_ai.user.message', body={'content': content, 'role': 'user'})
525
521
 
522
+ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
523
+ parts: list[_otel_messages.MessagePart] = []
524
+ content: Sequence[UserContent] = [self.content] if isinstance(self.content, str) else self.content
525
+ for part in content:
526
+ if isinstance(part, str):
527
+ parts.append(
528
+ _otel_messages.TextPart(type='text', **({'content': part} if settings.include_content else {}))
529
+ )
530
+ elif isinstance(part, (ImageUrl, AudioUrl, DocumentUrl, VideoUrl)):
531
+ parts.append(
532
+ _otel_messages.MediaUrlPart(
533
+ type=part.kind,
534
+ **{'url': part.url} if settings.include_content else {},
535
+ )
536
+ )
537
+ elif isinstance(part, BinaryContent):
538
+ converted_part = _otel_messages.BinaryDataPart(type='binary', media_type=part.media_type)
539
+ if settings.include_content and settings.include_binary_content:
540
+ converted_part['content'] = base64.b64encode(part.data).decode()
541
+ parts.append(converted_part)
542
+ else:
543
+ parts.append({'type': part.kind}) # pragma: no cover
544
+ return parts
545
+
526
546
  __repr__ = _utils.dataclasses_no_defaults_repr
527
547
 
528
548
 
@@ -576,6 +596,18 @@ class BaseToolReturnPart:
576
596
  },
577
597
  )
578
598
 
599
+ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
600
+ from .models.instrumented import InstrumentedModel
601
+
602
+ return [
603
+ _otel_messages.ToolCallResponsePart(
604
+ type='tool_call_response',
605
+ id=self.tool_call_id,
606
+ name=self.tool_name,
607
+ **({'result': InstrumentedModel.serialize_any(self.content)} if settings.include_content else {}),
608
+ )
609
+ ]
610
+
579
611
  def has_content(self) -> bool:
580
612
  """Return `True` if the tool return has content."""
581
613
  return self.content is not None # pragma: no cover
@@ -669,6 +701,19 @@ class RetryPromptPart:
669
701
  },
670
702
  )
671
703
 
704
+ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
705
+ if self.tool_name is None:
706
+ return [_otel_messages.TextPart(type='text', content=self.model_response())]
707
+ else:
708
+ return [
709
+ _otel_messages.ToolCallResponsePart(
710
+ type='tool_call_response',
711
+ id=self.tool_call_id,
712
+ name=self.tool_name,
713
+ **({'result': self.model_response()} if settings.include_content else {}),
714
+ )
715
+ ]
716
+
672
717
  __repr__ = _utils.dataclasses_no_defaults_repr
673
718
 
674
719
 
@@ -848,6 +893,9 @@ class ModelResponse:
848
893
  kind: Literal['response'] = 'response'
849
894
  """Message type identifier, this is available on all parts as a discriminator."""
850
895
 
896
+ provider_name: str | None = None
897
+ """The name of the LLM provider that generated the response."""
898
+
851
899
  provider_details: dict[str, Any] | None = field(default=None)
852
900
  """Additional provider-specific details in a serializable format.
853
901
 
@@ -858,6 +906,19 @@ class ModelResponse:
858
906
  provider_request_id: str | None = None
859
907
  """request ID as specified by the model provider. This can be used to track the specific request to the model."""
860
908
 
909
+ def price(self) -> genai_types.PriceCalculation:
910
+ """Calculate the price of the usage.
911
+
912
+ Uses [`genai-prices`](https://github.com/pydantic/genai-prices).
913
+ """
914
+ assert self.model_name, 'Model name is required to calculate price'
915
+ return calc_price(
916
+ self.usage,
917
+ self.model_name,
918
+ provider_id=self.provider_name,
919
+ genai_request_timestamp=self.timestamp,
920
+ )
921
+
861
922
  def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
862
923
  """Return OpenTelemetry events for the response."""
863
924
  result: list[Event] = []
@@ -894,6 +955,36 @@ class ModelResponse:
894
955
 
895
956
  return result
896
957
 
958
+ def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
959
+ parts: list[_otel_messages.MessagePart] = []
960
+ for part in self.parts:
961
+ if isinstance(part, TextPart):
962
+ parts.append(
963
+ _otel_messages.TextPart(
964
+ type='text',
965
+ **({'content': part.content} if settings.include_content else {}),
966
+ )
967
+ )
968
+ elif isinstance(part, ThinkingPart):
969
+ parts.append(
970
+ _otel_messages.ThinkingPart(
971
+ type='thinking',
972
+ **({'content': part.content} if settings.include_content else {}),
973
+ )
974
+ )
975
+ elif isinstance(part, ToolCallPart):
976
+ call_part = _otel_messages.ToolCallPart(type='tool_call', id=part.tool_call_id, name=part.tool_name)
977
+ if settings.include_content and part.args is not None:
978
+ from .models.instrumented import InstrumentedModel
979
+
980
+ if isinstance(part.args, str):
981
+ call_part['arguments'] = part.args
982
+ else:
983
+ call_part['arguments'] = {k: InstrumentedModel.serialize_any(v) for k, v in part.args.items()}
984
+
985
+ parts.append(call_part)
986
+ return parts
987
+
897
988
  @property
898
989
  @deprecated('`vendor_details` is deprecated, use `provider_details` instead')
899
990
  def vendor_details(self) -> dict[str, Any] | None:
@@ -598,6 +598,7 @@ class StreamedResponse(ABC):
598
598
  model_name=self.model_name,
599
599
  timestamp=self.timestamp,
600
600
  usage=self.usage(),
601
+ provider_name=self.provider_name,
601
602
  )
602
603
 
603
604
  def usage(self) -> RequestUsage:
@@ -610,6 +611,12 @@ class StreamedResponse(ABC):
610
611
  """Get the model name of the response."""
611
612
  raise NotImplementedError()
612
613
 
614
+ @property
615
+ @abstractmethod
616
+ def provider_name(self) -> str | None:
617
+ """Get the provider name."""
618
+ raise NotImplementedError()
619
+
613
620
  @property
614
621
  @abstractmethod
615
622
  def timestamp(self) -> datetime:
@@ -37,12 +37,13 @@ from ..messages import (
37
37
  )
38
38
  from ..profiles import ModelProfileSpec
39
39
  from ..providers import Provider, infer_provider
40
+ from ..providers.anthropic import AsyncAnthropicClient
40
41
  from ..settings import ModelSettings
41
42
  from ..tools import ToolDefinition
42
43
  from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests, download_item, get_user_agent
43
44
 
44
45
  try:
45
- from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
46
+ from anthropic import NOT_GIVEN, APIStatusError, AsyncStream
46
47
  from anthropic.types.beta import (
47
48
  BetaBase64PDFBlockParam,
48
49
  BetaBase64PDFSourceParam,
@@ -134,16 +135,16 @@ class AnthropicModel(Model):
134
135
  Apart from `__init__`, all methods are private or match those of the base class.
135
136
  """
136
137
 
137
- client: AsyncAnthropic = field(repr=False)
138
+ client: AsyncAnthropicClient = field(repr=False)
138
139
 
139
140
  _model_name: AnthropicModelName = field(repr=False)
140
- _provider: Provider[AsyncAnthropic] = field(repr=False)
141
+ _provider: Provider[AsyncAnthropicClient] = field(repr=False)
141
142
 
142
143
  def __init__(
143
144
  self,
144
145
  model_name: AnthropicModelName,
145
146
  *,
146
- provider: Literal['anthropic'] | Provider[AsyncAnthropic] = 'anthropic',
147
+ provider: Literal['anthropic'] | Provider[AsyncAnthropicClient] = 'anthropic',
147
148
  profile: ModelProfileSpec | None = None,
148
149
  settings: ModelSettings | None = None,
149
150
  ):
@@ -153,7 +154,7 @@ class AnthropicModel(Model):
153
154
  model_name: The name of the Anthropic model to use. List of model names available
154
155
  [here](https://docs.anthropic.com/en/docs/about-claude/models).
155
156
  provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
156
- instance of `Provider[AsyncAnthropic]`. If not provided, the other parameters will be used.
157
+ instance of `Provider[AsyncAnthropicClient]`. If not provided, the other parameters will be used.
157
158
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
158
159
  settings: Default model settings for this model instance.
159
160
  """
@@ -326,7 +327,11 @@ class AnthropicModel(Model):
326
327
  )
327
328
 
328
329
  return ModelResponse(
329
- items, usage=_map_usage(response), model_name=response.model, provider_request_id=response.id
330
+ items,
331
+ usage=_map_usage(response),
332
+ model_name=response.model,
333
+ provider_request_id=response.id,
334
+ provider_name=self._provider.name,
330
335
  )
331
336
 
332
337
  async def _process_streamed_response(
@@ -344,6 +349,7 @@ class AnthropicModel(Model):
344
349
  _model_name=self._model_name,
345
350
  _response=peekable_response,
346
351
  _timestamp=timestamp,
352
+ _provider_name=self._provider.name,
347
353
  )
348
354
 
349
355
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[BetaToolParam]:
@@ -574,6 +580,7 @@ class AnthropicStreamedResponse(StreamedResponse):
574
580
  _model_name: AnthropicModelName
575
581
  _response: AsyncIterable[BetaRawMessageStreamEvent]
576
582
  _timestamp: datetime
583
+ _provider_name: str
577
584
 
578
585
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
579
586
  current_block: BetaContentBlock | None = None
@@ -655,6 +662,11 @@ class AnthropicStreamedResponse(StreamedResponse):
655
662
  """Get the model name of the response."""
656
663
  return self._model_name
657
664
 
665
+ @property
666
+ def provider_name(self) -> str:
667
+ """Get the provider name."""
668
+ return self._provider_name
669
+
658
670
  @property
659
671
  def timestamp(self) -> datetime:
660
672
  """Get the timestamp of the response."""
@@ -240,10 +240,7 @@ class BedrockConverseModel(Model):
240
240
 
241
241
  @staticmethod
242
242
  def _map_tool_definition(f: ToolDefinition) -> ToolTypeDef:
243
- tool_spec: ToolSpecificationTypeDef = {
244
- 'name': f.name,
245
- 'inputSchema': {'json': f.parameters_json_schema},
246
- }
243
+ tool_spec: ToolSpecificationTypeDef = {'name': f.name, 'inputSchema': {'json': f.parameters_json_schema}}
247
244
 
248
245
  if f.description: # pragma: no branch
249
246
  tool_spec['description'] = f.description
@@ -275,6 +272,7 @@ class BedrockConverseModel(Model):
275
272
  model_request_parameters=model_request_parameters,
276
273
  _model_name=self.model_name,
277
274
  _event_stream=response,
275
+ _provider_name=self._provider.name,
278
276
  )
279
277
 
280
278
  async def _process_response(self, response: ConverseResponseTypeDef) -> ModelResponse:
@@ -304,7 +302,9 @@ class BedrockConverseModel(Model):
304
302
  output_tokens=response['usage']['outputTokens'],
305
303
  )
306
304
  vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
307
- return ModelResponse(items, usage=u, model_name=self.model_name, provider_request_id=vendor_id)
305
+ return ModelResponse(
306
+ items, usage=u, model_name=self.model_name, provider_request_id=vendor_id, provider_name=self._provider.name
307
+ )
308
308
 
309
309
  @overload
310
310
  async def _messages_create(
@@ -594,6 +594,7 @@ class BedrockStreamedResponse(StreamedResponse):
594
594
 
595
595
  _model_name: BedrockModelName
596
596
  _event_stream: EventStream[ConverseStreamOutputTypeDef]
597
+ _provider_name: str
597
598
  _timestamp: datetime = field(default_factory=_utils.now_utc)
598
599
 
599
600
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
@@ -660,15 +661,20 @@ class BedrockStreamedResponse(StreamedResponse):
660
661
  if maybe_event: # pragma: no branch
661
662
  yield maybe_event
662
663
 
663
- @property
664
- def timestamp(self) -> datetime:
665
- return self._timestamp
666
-
667
664
  @property
668
665
  def model_name(self) -> str:
669
666
  """Get the model name of the response."""
670
667
  return self._model_name
671
668
 
669
+ @property
670
+ def provider_name(self) -> str:
671
+ """Get the provider name."""
672
+ return self._provider_name
673
+
674
+ @property
675
+ def timestamp(self) -> datetime:
676
+ return self._timestamp
677
+
672
678
  def _map_usage(self, metadata: ConverseStreamMetadataEventTypeDef) -> usage.RequestUsage:
673
679
  return usage.RequestUsage(
674
680
  input_tokens=metadata['usage']['inputTokens'],
@@ -205,7 +205,9 @@ class CohereModel(Model):
205
205
  tool_call_id=c.id or _generate_tool_call_id(),
206
206
  )
207
207
  )
208
- return ModelResponse(parts=parts, usage=_map_usage(response), model_name=self._model_name)
208
+ return ModelResponse(
209
+ parts=parts, usage=_map_usage(response), model_name=self._model_name, provider_name=self._provider.name
210
+ )
209
211
 
210
212
  def _map_messages(self, messages: list[ModelMessage]) -> list[ChatMessageV2]:
211
213
  """Just maps a `pydantic_ai.Message` to a `cohere.ChatMessageV2`."""
@@ -304,6 +304,11 @@ class FunctionStreamedResponse(StreamedResponse):
304
304
  """Get the model name of the response."""
305
305
  return self._model_name
306
306
 
307
+ @property
308
+ def provider_name(self) -> None:
309
+ """Get the provider name."""
310
+ return None
311
+
307
312
  @property
308
313
  def timestamp(self) -> datetime:
309
314
  """Get the timestamp of the response."""
@@ -305,6 +305,7 @@ class GeminiModel(Model):
305
305
  _model_name=self._model_name,
306
306
  _content=content,
307
307
  _stream=aiter_bytes,
308
+ _provider_name=self._provider.name,
308
309
  )
309
310
 
310
311
  async def _message_to_gemini_content(
@@ -425,6 +426,7 @@ class GeminiStreamedResponse(StreamedResponse):
425
426
  _model_name: GeminiModelName
426
427
  _content: bytearray
427
428
  _stream: AsyncIterator[bytes]
429
+ _provider_name: str
428
430
  _timestamp: datetime = field(default_factory=_utils.now_utc, init=False)
429
431
 
430
432
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
@@ -495,6 +497,11 @@ class GeminiStreamedResponse(StreamedResponse):
495
497
  """Get the model name of the response."""
496
498
  return self._model_name
497
499
 
500
+ @property
501
+ def provider_name(self) -> str:
502
+ """Get the provider name."""
503
+ return self._provider_name
504
+
498
505
  @property
499
506
  def timestamp(self) -> datetime:
500
507
  """Get the timestamp of the response."""
@@ -883,7 +890,7 @@ def _metadata_as_usage(response: _GeminiResponse) -> usage.RequestUsage:
883
890
 
884
891
  return usage.RequestUsage(
885
892
  input_tokens=metadata.get('prompt_token_count', 0),
886
- output_tokens=metadata.get('candidates_token_count', 0),
893
+ output_tokens=metadata.get('candidates_token_count', 0) + thoughts_token_count,
887
894
  cache_read_tokens=cached_content_token_count,
888
895
  input_audio_tokens=input_audio_tokens,
889
896
  output_audio_tokens=output_audio_tokens,
@@ -395,6 +395,7 @@ class GoogleModel(Model):
395
395
  return _process_response_from_parts(
396
396
  parts,
397
397
  response.model_version or self._model_name,
398
+ self._provider.name,
398
399
  usage,
399
400
  vendor_id=vendor_id,
400
401
  vendor_details=vendor_details,
@@ -414,6 +415,7 @@ class GoogleModel(Model):
414
415
  _model_name=self._model_name,
415
416
  _response=peekable_response,
416
417
  _timestamp=first_chunk.create_time or _utils.now_utc(),
418
+ _provider_name=self._provider.name,
417
419
  )
418
420
 
419
421
  async def _map_messages(self, messages: list[ModelMessage]) -> tuple[ContentDict | None, list[ContentUnionDict]]:
@@ -523,6 +525,7 @@ class GeminiStreamedResponse(StreamedResponse):
523
525
  _model_name: GoogleModelName
524
526
  _response: AsyncIterator[GenerateContentResponse]
525
527
  _timestamp: datetime
528
+ _provider_name: str
526
529
 
527
530
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
528
531
  async for chunk in self._response:
@@ -531,7 +534,10 @@ class GeminiStreamedResponse(StreamedResponse):
531
534
  assert chunk.candidates is not None
532
535
  candidate = chunk.candidates[0]
533
536
  if candidate.content is None or candidate.content.parts is None:
534
- if candidate.finish_reason == 'SAFETY': # pragma: no cover
537
+ if candidate.finish_reason == 'STOP': # pragma: no cover
538
+ # Normal completion - skip this chunk
539
+ continue
540
+ elif candidate.finish_reason == 'SAFETY': # pragma: no cover
535
541
  raise UnexpectedModelBehavior('Safety settings triggered', str(chunk))
536
542
  else: # pragma: no cover
537
543
  raise UnexpectedModelBehavior('Content field missing from streaming Gemini response', str(chunk))
@@ -561,6 +567,11 @@ class GeminiStreamedResponse(StreamedResponse):
561
567
  """Get the model name of the response."""
562
568
  return self._model_name
563
569
 
570
+ @property
571
+ def provider_name(self) -> str:
572
+ """Get the provider name."""
573
+ return self._provider_name
574
+
564
575
  @property
565
576
  def timestamp(self) -> datetime:
566
577
  """Get the timestamp of the response."""
@@ -596,6 +607,7 @@ def _content_model_response(m: ModelResponse) -> ContentDict:
596
607
  def _process_response_from_parts(
597
608
  parts: list[Part],
598
609
  model_name: GoogleModelName,
610
+ provider_name: str,
599
611
  usage: usage.RequestUsage,
600
612
  vendor_id: str | None,
601
613
  vendor_details: dict[str, Any] | None = None,
@@ -633,7 +645,12 @@ def _process_response_from_parts(
633
645
  f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
634
646
  )
635
647
  return ModelResponse(
636
- parts=items, model_name=model_name, usage=usage, provider_request_id=vendor_id, provider_details=vendor_details
648
+ parts=items,
649
+ model_name=model_name,
650
+ usage=usage,
651
+ provider_request_id=vendor_id,
652
+ provider_details=vendor_details,
653
+ provider_name=provider_name,
637
654
  )
638
655
 
639
656
 
@@ -661,7 +678,7 @@ def _metadata_as_usage(response: GenerateContentResponse) -> usage.RequestUsage:
661
678
  if cached_content_token_count := metadata.cached_content_token_count:
662
679
  details['cached_content_tokens'] = cached_content_token_count
663
680
 
664
- if thoughts_token_count := metadata.thoughts_token_count:
681
+ if thoughts_token_count := (metadata.thoughts_token_count or 0):
665
682
  details['thoughts_tokens'] = thoughts_token_count
666
683
 
667
684
  if tool_use_prompt_token_count := metadata.tool_use_prompt_token_count:
@@ -694,7 +711,7 @@ def _metadata_as_usage(response: GenerateContentResponse) -> usage.RequestUsage:
694
711
 
695
712
  return usage.RequestUsage(
696
713
  input_tokens=metadata.prompt_token_count or 0,
697
- output_tokens=metadata.candidates_token_count or 0,
714
+ output_tokens=(metadata.candidates_token_count or 0) + thoughts_token_count,
698
715
  cache_read_tokens=cached_content_token_count or 0,
699
716
  input_audio_tokens=input_audio_tokens,
700
717
  output_audio_tokens=output_audio_tokens,
@@ -290,6 +290,7 @@ class GroqModel(Model):
290
290
  model_name=response.model,
291
291
  timestamp=timestamp,
292
292
  provider_request_id=response.id,
293
+ provider_name=self._provider.name,
293
294
  )
294
295
 
295
296
  async def _process_streamed_response(
@@ -309,6 +310,7 @@ class GroqModel(Model):
309
310
  _model_name=self._model_name,
310
311
  _model_profile=self.profile,
311
312
  _timestamp=number_to_datetime(first_chunk.created),
313
+ _provider_name=self._provider.name,
312
314
  )
313
315
 
314
316
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]:
@@ -444,6 +446,7 @@ class GroqStreamedResponse(StreamedResponse):
444
446
  _model_profile: ModelProfile
445
447
  _response: AsyncIterable[chat.ChatCompletionChunk]
446
448
  _timestamp: datetime
449
+ _provider_name: str
447
450
 
448
451
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
449
452
  async for chunk in self._response:
@@ -482,6 +485,11 @@ class GroqStreamedResponse(StreamedResponse):
482
485
  """Get the model name of the response."""
483
486
  return self._model_name
484
487
 
488
+ @property
489
+ def provider_name(self) -> str:
490
+ """Get the provider name."""
491
+ return self._provider_name
492
+
485
493
  @property
486
494
  def timestamp(self) -> datetime:
487
495
  """Get the timestamp of the response."""
@@ -272,6 +272,7 @@ class HuggingFaceModel(Model):
272
272
  model_name=response.model,
273
273
  timestamp=timestamp,
274
274
  provider_request_id=response.id,
275
+ provider_name=self._provider.name,
275
276
  )
276
277
 
277
278
  async def _process_streamed_response(
@@ -291,6 +292,7 @@ class HuggingFaceModel(Model):
291
292
  _model_profile=self.profile,
292
293
  _response=peekable_response,
293
294
  _timestamp=datetime.fromtimestamp(first_chunk.created, tz=timezone.utc),
295
+ _provider_name=self._provider.name,
294
296
  )
295
297
 
296
298
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ChatCompletionInputTool]:
@@ -437,6 +439,7 @@ class HuggingFaceStreamedResponse(StreamedResponse):
437
439
  _model_profile: ModelProfile
438
440
  _response: AsyncIterable[ChatCompletionStreamOutput]
439
441
  _timestamp: datetime
442
+ _provider_name: str
440
443
 
441
444
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
442
445
  async for chunk in self._response:
@@ -474,6 +477,11 @@ class HuggingFaceStreamedResponse(StreamedResponse):
474
477
  """Get the model name of the response."""
475
478
  return self._model_name
476
479
 
480
+ @property
481
+ def provider_name(self) -> str:
482
+ """Get the provider name."""
483
+ return self._provider_name
484
+
477
485
  @property
478
486
  def timestamp(self) -> datetime:
479
487
  """Get the timestamp of the response."""
@@ -1,10 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import itertools
3
4
  import json
4
5
  from collections.abc import AsyncIterator, Iterator, Mapping
5
6
  from contextlib import asynccontextmanager, contextmanager
6
7
  from dataclasses import dataclass, field
7
- from typing import Any, Callable, Literal
8
+ from typing import Any, Callable, Literal, cast
8
9
  from urllib.parse import urlparse
9
10
 
10
11
  from opentelemetry._events import (
@@ -18,8 +19,14 @@ from opentelemetry.trace import Span, Tracer, TracerProvider, get_tracer_provide
18
19
  from opentelemetry.util.types import AttributeValue
19
20
  from pydantic import TypeAdapter
20
21
 
22
+ from .. import _otel_messages
21
23
  from .._run_context import RunContext
22
- from ..messages import ModelMessage, ModelRequest, ModelResponse
24
+ from ..messages import (
25
+ ModelMessage,
26
+ ModelRequest,
27
+ ModelResponse,
28
+ SystemPromptPart,
29
+ )
23
30
  from ..settings import ModelSettings
24
31
  from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse
25
32
  from .wrapper import WrapperModel
@@ -80,6 +87,8 @@ class InstrumentationSettings:
80
87
  event_logger: EventLogger = field(repr=False)
81
88
  event_mode: Literal['attributes', 'logs'] = 'attributes'
82
89
  include_binary_content: bool = True
90
+ include_content: bool = True
91
+ version: Literal[1, 2] = 1
83
92
 
84
93
  def __init__(
85
94
  self,
@@ -90,6 +99,7 @@ class InstrumentationSettings:
90
99
  event_logger_provider: EventLoggerProvider | None = None,
91
100
  include_binary_content: bool = True,
92
101
  include_content: bool = True,
102
+ version: Literal[1, 2] = 1,
93
103
  ):
94
104
  """Create instrumentation options.
95
105
 
@@ -109,6 +119,10 @@ class InstrumentationSettings:
109
119
  include_binary_content: Whether to include binary content in the instrumentation events.
110
120
  include_content: Whether to include prompts, completions, and tool call arguments and responses
111
121
  in the instrumentation events.
122
+ version: Version of the data format.
123
+ Version 1 is based on the legacy event-based OpenTelemetry GenAI spec.
124
+ Version 2 stores messages in the attributes `gen_ai.input.messages` and `gen_ai.output.messages`.
125
+ Version 2 is still WIP and experimental, but will become the default in Pydantic AI v1.
112
126
  """
113
127
  from pydantic_ai import __version__
114
128
 
@@ -122,6 +136,7 @@ class InstrumentationSettings:
122
136
  self.event_mode = event_mode
123
137
  self.include_binary_content = include_binary_content
124
138
  self.include_content = include_content
139
+ self.version = version
125
140
 
126
141
  # As specified in the OpenTelemetry GenAI metrics spec:
127
142
  # https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-metrics/#metric-gen_aiclienttokenusage
@@ -179,6 +194,90 @@ class InstrumentationSettings:
179
194
  event.body = InstrumentedModel.serialize_any(event.body)
180
195
  return events
181
196
 
197
+ def messages_to_otel_messages(self, messages: list[ModelMessage]) -> list[_otel_messages.ChatMessage]:
198
+ result: list[_otel_messages.ChatMessage] = []
199
+ for message in messages:
200
+ if isinstance(message, ModelRequest):
201
+ for is_system, group in itertools.groupby(message.parts, key=lambda p: isinstance(p, SystemPromptPart)):
202
+ message_parts: list[_otel_messages.MessagePart] = []
203
+ for part in group:
204
+ if hasattr(part, 'otel_message_parts'):
205
+ message_parts.extend(part.otel_message_parts(self))
206
+ result.append(
207
+ _otel_messages.ChatMessage(role='system' if is_system else 'user', parts=message_parts)
208
+ )
209
+ elif isinstance(message, ModelResponse): # pragma: no branch
210
+ result.append(_otel_messages.ChatMessage(role='assistant', parts=message.otel_message_parts(self)))
211
+ return result
212
+
213
+ def handle_messages(self, input_messages: list[ModelMessage], response: ModelResponse, system: str, span: Span):
214
+ if self.version == 1:
215
+ events = self.messages_to_otel_events(input_messages)
216
+ for event in self.messages_to_otel_events([response]):
217
+ events.append(
218
+ Event(
219
+ 'gen_ai.choice',
220
+ body={
221
+ 'index': 0,
222
+ 'message': event.body,
223
+ },
224
+ )
225
+ )
226
+ for event in events:
227
+ event.attributes = {
228
+ GEN_AI_SYSTEM_ATTRIBUTE: system,
229
+ **(event.attributes or {}),
230
+ }
231
+ self._emit_events(span, events)
232
+ else:
233
+ output_messages = self.messages_to_otel_messages([response])
234
+ assert len(output_messages) == 1
235
+ output_message = cast(_otel_messages.OutputMessage, output_messages[0])
236
+ if response.provider_details and 'finish_reason' in response.provider_details:
237
+ output_message['finish_reason'] = response.provider_details['finish_reason']
238
+ instructions = InstrumentedModel._get_instructions(input_messages) # pyright: ignore [reportPrivateUsage]
239
+ attributes = {
240
+ 'gen_ai.input.messages': json.dumps(self.messages_to_otel_messages(input_messages)),
241
+ 'gen_ai.output.messages': json.dumps([output_message]),
242
+ 'logfire.json_schema': json.dumps(
243
+ {
244
+ 'type': 'object',
245
+ 'properties': {
246
+ 'gen_ai.input.messages': {'type': 'array'},
247
+ 'gen_ai.output.messages': {'type': 'array'},
248
+ **({'gen_ai.system_instructions': {'type': 'array'}} if instructions else {}),
249
+ 'model_request_parameters': {'type': 'object'},
250
+ },
251
+ }
252
+ ),
253
+ }
254
+ if instructions is not None:
255
+ attributes['gen_ai.system_instructions'] = json.dumps(
256
+ [_otel_messages.TextPart(type='text', content=instructions)]
257
+ )
258
+ span.set_attributes(attributes)
259
+
260
+ def _emit_events(self, span: Span, events: list[Event]) -> None:
261
+ if self.event_mode == 'logs':
262
+ for event in events:
263
+ self.event_logger.emit(event)
264
+ else:
265
+ attr_name = 'events'
266
+ span.set_attributes(
267
+ {
268
+ attr_name: json.dumps([InstrumentedModel.event_to_dict(event) for event in events]),
269
+ 'logfire.json_schema': json.dumps(
270
+ {
271
+ 'type': 'object',
272
+ 'properties': {
273
+ attr_name: {'type': 'array'},
274
+ 'model_request_parameters': {'type': 'object'},
275
+ },
276
+ }
277
+ ),
278
+ }
279
+ )
280
+
182
281
 
183
282
  GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'
184
283
  GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'
@@ -269,7 +368,7 @@ class InstrumentedModel(WrapperModel):
269
368
  # FallbackModel updates these span attributes.
270
369
  attributes.update(getattr(span, 'attributes', {}))
271
370
  request_model = attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]
272
- system = attributes[GEN_AI_SYSTEM_ATTRIBUTE]
371
+ system = cast(str, attributes[GEN_AI_SYSTEM_ATTRIBUTE])
273
372
 
274
373
  response_model = response.model_name or request_model
275
374
 
@@ -297,18 +396,7 @@ class InstrumentedModel(WrapperModel):
297
396
  if not span.is_recording():
298
397
  return
299
398
 
300
- events = self.instrumentation_settings.messages_to_otel_events(messages)
301
- for event in self.instrumentation_settings.messages_to_otel_events([response]):
302
- events.append(
303
- Event(
304
- 'gen_ai.choice',
305
- body={
306
- # TODO finish_reason
307
- 'index': 0,
308
- 'message': event.body,
309
- },
310
- )
311
- )
399
+ self.instrumentation_settings.handle_messages(messages, response, system, span)
312
400
  span.set_attributes(
313
401
  {
314
402
  **response.usage.opentelemetry_attributes(),
@@ -316,12 +404,6 @@ class InstrumentedModel(WrapperModel):
316
404
  }
317
405
  )
318
406
  span.update_name(f'{operation} {request_model}')
319
- for event in events:
320
- event.attributes = {
321
- GEN_AI_SYSTEM_ATTRIBUTE: system,
322
- **(event.attributes or {}),
323
- }
324
- self._emit_events(span, events)
325
407
 
326
408
  yield finish
327
409
  finally:
@@ -330,27 +412,6 @@ class InstrumentedModel(WrapperModel):
330
412
  # to prevent them from being redundantly recorded in the span itself by logfire.
331
413
  record_metrics()
332
414
 
333
- def _emit_events(self, span: Span, events: list[Event]) -> None:
334
- if self.instrumentation_settings.event_mode == 'logs':
335
- for event in events:
336
- self.instrumentation_settings.event_logger.emit(event)
337
- else:
338
- attr_name = 'events'
339
- span.set_attributes(
340
- {
341
- attr_name: json.dumps([self.event_to_dict(event) for event in events]),
342
- 'logfire.json_schema': json.dumps(
343
- {
344
- 'type': 'object',
345
- 'properties': {
346
- attr_name: {'type': 'array'},
347
- 'model_request_parameters': {'type': 'object'},
348
- },
349
- }
350
- ),
351
- }
352
- )
353
-
354
415
  @staticmethod
355
416
  def model_attributes(model: Model):
356
417
  attributes: dict[str, AttributeValue] = {
@@ -353,6 +353,7 @@ class MistralModel(Model):
353
353
  model_name=response.model,
354
354
  timestamp=timestamp,
355
355
  provider_request_id=response.id,
356
+ provider_name=self._provider.name,
356
357
  )
357
358
 
358
359
  async def _process_streamed_response(
@@ -378,6 +379,7 @@ class MistralModel(Model):
378
379
  _response=peekable_response,
379
380
  _model_name=self._model_name,
380
381
  _timestamp=timestamp,
382
+ _provider_name=self._provider.name,
381
383
  )
382
384
 
383
385
  @staticmethod
@@ -584,6 +586,7 @@ class MistralStreamedResponse(StreamedResponse):
584
586
  _model_name: MistralModelName
585
587
  _response: AsyncIterable[MistralCompletionEvent]
586
588
  _timestamp: datetime
589
+ _provider_name: str
587
590
 
588
591
  _delta_content: str = field(default='', init=False)
589
592
 
@@ -631,6 +634,11 @@ class MistralStreamedResponse(StreamedResponse):
631
634
  """Get the model name of the response."""
632
635
  return self._model_name
633
636
 
637
+ @property
638
+ def provider_name(self) -> str:
639
+ """Get the provider name."""
640
+ return self._provider_name
641
+
634
642
  @property
635
643
  def timestamp(self) -> datetime:
636
644
  """Get the timestamp of the response."""
@@ -500,6 +500,7 @@ class OpenAIModel(Model):
500
500
  timestamp=timestamp,
501
501
  provider_details=vendor_details,
502
502
  provider_request_id=response.id,
503
+ provider_name=self._provider.name,
503
504
  )
504
505
 
505
506
  async def _process_streamed_response(
@@ -519,6 +520,7 @@ class OpenAIModel(Model):
519
520
  _model_profile=self.profile,
520
521
  _response=peekable_response,
521
522
  _timestamp=number_to_datetime(first_chunk.created),
523
+ _provider_name=self._provider.name,
522
524
  )
523
525
 
524
526
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]:
@@ -571,6 +573,8 @@ class OpenAIModel(Model):
571
573
  # Note: model responses from this model should only have one text item, so the following
572
574
  # shouldn't merge multiple texts into one unless you switch models between runs:
573
575
  message_param['content'] = '\n\n'.join(texts)
576
+ else:
577
+ message_param['content'] = None
574
578
  if tool_calls:
575
579
  message_param['tool_calls'] = tool_calls
576
580
  openai_messages.append(message_param)
@@ -803,6 +807,7 @@ class OpenAIResponsesModel(Model):
803
807
  model_name=response.model,
804
808
  provider_request_id=response.id,
805
809
  timestamp=timestamp,
810
+ provider_name=self._provider.name,
806
811
  )
807
812
 
808
813
  async def _process_streamed_response(
@@ -822,6 +827,7 @@ class OpenAIResponsesModel(Model):
822
827
  _model_name=self._model_name,
823
828
  _response=peekable_response,
824
829
  _timestamp=number_to_datetime(first_chunk.response.created_at),
830
+ _provider_name=self._provider.name,
825
831
  )
826
832
 
827
833
  @overload
@@ -1137,6 +1143,7 @@ class OpenAIStreamedResponse(StreamedResponse):
1137
1143
  _model_profile: ModelProfile
1138
1144
  _response: AsyncIterable[ChatCompletionChunk]
1139
1145
  _timestamp: datetime
1146
+ _provider_name: str
1140
1147
 
1141
1148
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1142
1149
  async for chunk in self._response:
@@ -1180,6 +1187,11 @@ class OpenAIStreamedResponse(StreamedResponse):
1180
1187
  """Get the model name of the response."""
1181
1188
  return self._model_name
1182
1189
 
1190
+ @property
1191
+ def provider_name(self) -> str:
1192
+ """Get the provider name."""
1193
+ return self._provider_name
1194
+
1183
1195
  @property
1184
1196
  def timestamp(self) -> datetime:
1185
1197
  """Get the timestamp of the response."""
@@ -1193,6 +1205,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1193
1205
  _model_name: OpenAIModelName
1194
1206
  _response: AsyncIterable[responses.ResponseStreamEvent]
1195
1207
  _timestamp: datetime
1208
+ _provider_name: str
1196
1209
 
1197
1210
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
1198
1211
  async for chunk in self._response:
@@ -1313,6 +1326,11 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1313
1326
  """Get the model name of the response."""
1314
1327
  return self._model_name
1315
1328
 
1329
+ @property
1330
+ def provider_name(self) -> str:
1331
+ """Get the provider name."""
1332
+ return self._provider_name
1333
+
1316
1334
  @property
1317
1335
  def timestamp(self) -> datetime:
1318
1336
  """Get the timestamp of the response."""
@@ -131,6 +131,7 @@ class TestModel(Model):
131
131
  _model_name=self._model_name,
132
132
  _structured_response=model_response,
133
133
  _messages=messages,
134
+ _provider_name=self._system,
134
135
  )
135
136
 
136
137
  @property
@@ -263,6 +264,7 @@ class TestStreamedResponse(StreamedResponse):
263
264
  _model_name: str
264
265
  _structured_response: ModelResponse
265
266
  _messages: InitVar[Iterable[ModelMessage]]
267
+ _provider_name: str
266
268
  _timestamp: datetime = field(default_factory=_utils.now_utc, init=False)
267
269
 
268
270
  def __post_init__(self, _messages: Iterable[ModelMessage]):
@@ -305,6 +307,11 @@ class TestStreamedResponse(StreamedResponse):
305
307
  """Get the model name of the response."""
306
308
  return self._model_name
307
309
 
310
+ @property
311
+ def provider_name(self) -> str:
312
+ """Get the provider name."""
313
+ return self._provider_name
314
+
308
315
  @property
309
316
  def timestamp(self) -> datetime:
310
317
  """Get the timestamp of the response."""
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import os
4
- from typing import overload
4
+ from typing import Union, overload
5
5
 
6
6
  import httpx
7
+ from typing_extensions import TypeAlias
7
8
 
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
@@ -12,15 +13,18 @@ from pydantic_ai.profiles.anthropic import anthropic_model_profile
12
13
  from pydantic_ai.providers import Provider
13
14
 
14
15
  try:
15
- from anthropic import AsyncAnthropic
16
- except ImportError as _import_error: # pragma: no cover
16
+ from anthropic import AsyncAnthropic, AsyncAnthropicBedrock
17
+ except ImportError as _import_error:
17
18
  raise ImportError(
18
19
  'Please install the `anthropic` package to use the Anthropic provider, '
19
20
  'you can use the `anthropic` optional group — `pip install "pydantic-ai-slim[anthropic]"`'
20
21
  ) from _import_error
21
22
 
22
23
 
23
- class AnthropicProvider(Provider[AsyncAnthropic]):
24
+ AsyncAnthropicClient: TypeAlias = Union[AsyncAnthropic, AsyncAnthropicBedrock]
25
+
26
+
27
+ class AnthropicProvider(Provider[AsyncAnthropicClient]):
24
28
  """Provider for Anthropic API."""
25
29
 
26
30
  @property
@@ -32,14 +36,14 @@ class AnthropicProvider(Provider[AsyncAnthropic]):
32
36
  return str(self._client.base_url)
33
37
 
34
38
  @property
35
- def client(self) -> AsyncAnthropic:
39
+ def client(self) -> AsyncAnthropicClient:
36
40
  return self._client
37
41
 
38
42
  def model_profile(self, model_name: str) -> ModelProfile | None:
39
43
  return anthropic_model_profile(model_name)
40
44
 
41
45
  @overload
42
- def __init__(self, *, anthropic_client: AsyncAnthropic | None = None) -> None: ...
46
+ def __init__(self, *, anthropic_client: AsyncAnthropicClient | None = None) -> None: ...
43
47
 
44
48
  @overload
45
49
  def __init__(self, *, api_key: str | None = None, http_client: httpx.AsyncClient | None = None) -> None: ...
@@ -48,7 +52,7 @@ class AnthropicProvider(Provider[AsyncAnthropic]):
48
52
  self,
49
53
  *,
50
54
  api_key: str | None = None,
51
- anthropic_client: AsyncAnthropic | None = None,
55
+ anthropic_client: AsyncAnthropicClient | None = None,
52
56
  http_client: httpx.AsyncClient | None = None,
53
57
  ) -> None:
54
58
  """Create a new Anthropic provider.
@@ -71,7 +75,6 @@ class AnthropicProvider(Provider[AsyncAnthropic]):
71
75
  'Set the `ANTHROPIC_API_KEY` environment variable or pass it via `AnthropicProvider(api_key=...)`'
72
76
  'to use the Anthropic provider.'
73
77
  )
74
-
75
78
  if http_client is not None:
76
79
  self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
77
80
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.7.4
3
+ Version: 0.7.5
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -35,7 +35,7 @@ Requires-Dist: genai-prices>=0.0.22
35
35
  Requires-Dist: griffe>=1.3.2
36
36
  Requires-Dist: httpx>=0.27
37
37
  Requires-Dist: opentelemetry-api>=1.28.0
38
- Requires-Dist: pydantic-graph==0.7.4
38
+ Requires-Dist: pydantic-graph==0.7.5
39
39
  Requires-Dist: pydantic>=2.10
40
40
  Requires-Dist: typing-inspection>=0.4.0
41
41
  Provides-Extra: a2a
@@ -57,9 +57,9 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==0.7.4; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==0.7.5; extra == 'evals'
61
61
  Provides-Extra: google
62
- Requires-Dist: google-genai>=1.28.0; extra == 'google'
62
+ Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
64
64
  Requires-Dist: groq>=0.25.0; extra == 'groq'
65
65
  Provides-Extra: huggingface
@@ -6,6 +6,7 @@ pydantic_ai/_cli.py,sha256=YNqV2Kb3KYpof0lD4wwnULM2sCxzsvBzsh-oPd6lJN8,13959
6
6
  pydantic_ai/_function_schema.py,sha256=YFHxb6bKfhgeY6rNdbuYXgndGCDanveUx2258xkSNlQ,11233
7
7
  pydantic_ai/_griffe.py,sha256=Ugft16ZHw9CN_6-lW0Svn6jESK9zHXO_x4utkGBkbBI,5253
8
8
  pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
9
+ pydantic_ai/_otel_messages.py,sha256=C_t8Jj0tlbYPGS_Wuf5eSw_7n3SwDsFfLQjLl9hM_bk,1480
9
10
  pydantic_ai/_output.py,sha256=6Vxlw8F9nRWCkjy4qvFF8tmDi2xZn7Dq72T6s4C5kAM,37640
10
11
  pydantic_ai/_parts_manager.py,sha256=zrra5yDpAX8cFB_eK0btAp9d6NAR979V1Rmepm83l1w,17980
11
12
  pydantic_ai/_run_context.py,sha256=uSjR0a9QJ7KifqJsu7buGpK4wqexO7ldZMzWsBOboDI,1697
@@ -14,12 +15,12 @@ pydantic_ai/_thinking_part.py,sha256=x80-Vkon16GOyq3W6f2qzafTVPC5dCgF7QD3k8ZMmYU
14
15
  pydantic_ai/_tool_manager.py,sha256=WPMXgHBzyn7UgRKIuqU-oV2GpsAOW0nF2RsxPCKOp7U,9655
15
16
  pydantic_ai/_utils.py,sha256=Ge9rtu8NJvsfSFjx1MduITPr0-9b_I0emDFSpwJbYes,16372
16
17
  pydantic_ai/ag_ui.py,sha256=bd-RJYFFmcw2xkAWj-7N0ZLRMa1dam3LEhlgoNo1l9I,26475
17
- pydantic_ai/builtin_tools.py,sha256=ipo0q5DISc99k2xD453Yaw61v-3tCmlD2mDval9UCRU,3181
18
+ pydantic_ai/builtin_tools.py,sha256=Fr9PF5RDdi5xQzKj7VJ8iDulbNgvF0yBdCC8E6F38Vo,3194
18
19
  pydantic_ai/direct.py,sha256=ym1Lb7oCPDU2eMmN8LXzN7Dp_IUJIPmlyKgmxxBUxsc,14905
19
20
  pydantic_ai/exceptions.py,sha256=vHRH_b6JpMi5p5EGhz2O4FSeKGJv3WMD291Y1FjHYFc,3528
20
21
  pydantic_ai/format_prompt.py,sha256=Or-Ytq55RQb1UJqy2HKIyPpZ-knWXfdDP3Z6tNc6Orw,4244
21
22
  pydantic_ai/mcp.py,sha256=n9_ECHmFE-eOZmb1bDh94oy81caefdtSGo1oH2KKWMo,31162
22
- pydantic_ai/messages.py,sha256=fPIicZ2J149x4cGSBbgeVahaDlnK6ChbMZ8gsZLPPfg,46099
23
+ pydantic_ai/messages.py,sha256=G65-bCxQt3EpfD-uAFs-Pz8HXFeIqe2V-kvOeXRsaOg,50131
23
24
  pydantic_ai/output.py,sha256=54Cwd1RruXlA5hucZ1h-SxFrzKHJuLvYvLtH9iyg2GI,11988
24
25
  pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
26
  pydantic_ai/result.py,sha256=gUVJIyoM1FewRFLj7ezuXr7M2RSaQL4AytoIpBMmDy8,19800
@@ -40,27 +41,27 @@ pydantic_ai/durable_exec/temporal/_agent.py,sha256=H1oHoZC9wOuAN79LG7Qb3DNtK2XFC
40
41
  pydantic_ai/durable_exec/temporal/_function_toolset.py,sha256=qKTs7T6YcJ2LBOvrlYBRUHd9zn86TuUaY0Qw5TPGSmE,3848
41
42
  pydantic_ai/durable_exec/temporal/_logfire.py,sha256=_dsvVIdpYJEUyoFUIDYQjCO6TjTITqvlJHwNy_Fi2cw,1866
42
43
  pydantic_ai/durable_exec/temporal/_mcp_server.py,sha256=J7CC4gRDhM4uQo3_kNKV7e4uiWC0He0AgS7bdq9Pt4o,6005
43
- pydantic_ai/durable_exec/temporal/_model.py,sha256=7_-f-n1JEg4YsJ8NGoB9RXpaAB-mWZZBR0wtGqa3NgE,6645
44
+ pydantic_ai/durable_exec/temporal/_model.py,sha256=hHanoDWEi6FgSvFJ7HUJaAzE0aEFAj0VZ2sb0wQ-3EY,6765
44
45
  pydantic_ai/durable_exec/temporal/_run_context.py,sha256=5NTomzWBAlFcXeVw-4mqa76Rmrc8b3G1bB5ElVsAyCY,2310
45
46
  pydantic_ai/durable_exec/temporal/_toolset.py,sha256=XlQx7NGSKR9n-mjNWaTn-i3HW9X4z5fZUAM9DwDTBwY,2865
46
47
  pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
48
  pydantic_ai/ext/aci.py,sha256=sUllKDNO-LOMurbFgxwRHuzNlBkSa3aVBqXfEm-A_vo,2545
48
49
  pydantic_ai/ext/langchain.py,sha256=iLVEZv1kcLkdIHo3us2yfdi0kVqyJ6qTaCt9BoLWm4k,2335
49
- pydantic_ai/models/__init__.py,sha256=h29lgKzOx8fHV0wXHMmaaawn8xGpAuFFxQwZJx4ihvU,34637
50
- pydantic_ai/models/anthropic.py,sha256=WO-PGqtXZaFV9Oz9aHr-ADMc2qSr8TVep4tPmhWL_DI,30122
51
- pydantic_ai/models/bedrock.py,sha256=z-qs8RikRxClbhDNsbRcuZ1GsPlb-GjoV88QD6vXOtE,30723
52
- pydantic_ai/models/cohere.py,sha256=jZPMuFvNv9JTVCbC_p7_1Uxuc5WGn6RVFpH_pwG7iO0,13009
50
+ pydantic_ai/models/__init__.py,sha256=tfMixYa06ByHDBo5A0NFNpMuwr8mXpiVgAp6JBBSYlU,34834
51
+ pydantic_ai/models/anthropic.py,sha256=2Nhu_m_W9EdsbeUe4aAV-Jcdv7hPOG4OuI6DmRjSi2o,30464
52
+ pydantic_ai/models/bedrock.py,sha256=8azpQUM4Z4haDM0kdrkUUcc88Jpu3ff6dybznyRJ20o,30940
53
+ pydantic_ai/models/cohere.py,sha256=WosAOwDbxpeVpIX4swmaxL5XgfcfHwkoekt4UqVeNUQ,13066
53
54
  pydantic_ai/models/fallback.py,sha256=ftcYhl0oSOer0vYbLkaf7dfgI_3h0hk7hp07i1Wh9rI,5526
54
- pydantic_ai/models/function.py,sha256=dY1aCUwcVXot5e2jWApWMAMnKWkSeEH4vqoe6mTBBZ0,14139
55
- pydantic_ai/models/gemini.py,sha256=2KoN-sf5skHOkVcvbV5Zw4jGYanZX4n_K4jhyJVYK4U,39151
56
- pydantic_ai/models/google.py,sha256=46q5NF8hSUHAMNcGbFNqZgK_S24klWb2aAFelMYy79s,30891
57
- pydantic_ai/models/groq.py,sha256=kqegrHbeabp__jNwuib3k0vQTVpra7Gy3D_dwbH9vv8,20960
58
- pydantic_ai/models/huggingface.py,sha256=JcYDfZbkYsQ9KUlDwfraiWq7dbg8Q0FRKkx8Lv9IWjg,20182
59
- pydantic_ai/models/instrumented.py,sha256=wzdnuIIIgpMJ7zqhMf-S--ffg9YYEgn5WRkJE2cCwao,16307
55
+ pydantic_ai/models/function.py,sha256=jSr55QtW1fFxoXNK3EgrmuCcaG0U6uh6q56q8hrI_To,14248
56
+ pydantic_ai/models/gemini.py,sha256=GVSJWhB5Mlkb2uvaq_4BYYRLY59EgP-geCOgrtjbnUI,39369
57
+ pydantic_ai/models/google.py,sha256=rF3x1FpXlmHy53PmNbRuuJJ9ozTnWGGMSo1MGJPw3m8,31408
58
+ pydantic_ai/models/groq.py,sha256=n0V_8ZFeDK7sDk6gc_cAzsaoxjydUq46SzyvKM-gzoM,21202
59
+ pydantic_ai/models/huggingface.py,sha256=7yxCiLHJe_M3S50W6VaRREyHbXn5DTm6558eKZUHiQE,20424
60
+ pydantic_ai/models/instrumented.py,sha256=2RX7aSqtU01NAXt2-0TWeHIDNNmT_KCHd-8qG7bgaN8,19411
60
61
  pydantic_ai/models/mcp_sampling.py,sha256=iXIjwTP5Jszn0Iz-5MZiCk_BdUYE7IAxCnC5yIrGIu0,3436
61
- pydantic_ai/models/mistral.py,sha256=jNkzlEgqD1n0Fb3Yy9CFnlQ-Oi7pe7ixhXokqfrQFqg,32208
62
- pydantic_ai/models/openai.py,sha256=B6a2zDskBJITszwwmjncQLw4g9qAgZc5HgnlVhtGr7A,62939
63
- pydantic_ai/models/test.py,sha256=7J9bZOBwqQ3X0f5PT0_JakMPSB5UmIZ5ZFlX8hH45NU,19089
62
+ pydantic_ai/models/mistral.py,sha256=5i_zl_pzo--Dll5OCT7vwCQmvvUt9cI5d19_NVHDRnM,32450
63
+ pydantic_ai/models/openai.py,sha256=Y1OCpLL_QHOHpIs102Bp0aLB93UPbkUF_q6AMBzDi6Q,63497
64
+ pydantic_ai/models/test.py,sha256=7D1l21iI5Kwp9Stdhdd-M8V_1PeofaffETKvB_SHV3Q,19277
64
65
  pydantic_ai/models/wrapper.py,sha256=9MeHW7mXPsEK03IKL0rtjeX6QgXyZROOOzLh72GiX2k,2148
65
66
  pydantic_ai/profiles/__init__.py,sha256=AdwFrK50_20qJBA_eMXXsV1vdGOvPxLVW82hMQvzXU0,3285
66
67
  pydantic_ai/profiles/_json_schema.py,sha256=CthOGmPSjgEZRRglfvg31zyQ9vjHDdacXoFpmba93dE,7206
@@ -77,7 +78,7 @@ pydantic_ai/profiles/moonshotai.py,sha256=e1RJnbEvazE6aJAqfmYLYGNtwNwg52XQDRDkcL
77
78
  pydantic_ai/profiles/openai.py,sha256=lSPM1tDiWVxQH45YzD_GjYsnkBzLankRFu5JRGfx81I,8112
78
79
  pydantic_ai/profiles/qwen.py,sha256=K4_nJ_oN5NS_9W0Fl-dFgC4emVRTHPXFTtiJ_nycKHo,373
79
80
  pydantic_ai/providers/__init__.py,sha256=-jb9Vl4gE7z0katqwLPaKt5UileuPp0Brq0ZueBVJys,4246
80
- pydantic_ai/providers/anthropic.py,sha256=D35UXxCPXv8yIbD0fj9Zg2FvNyoMoJMeDUtVM8Sn78I,3046
81
+ pydantic_ai/providers/anthropic.py,sha256=i9Y7hAHWPTcPUsmRSxtD7QtNrEk5n3uvEtCkYWyipJ4,3200
81
82
  pydantic_ai/providers/azure.py,sha256=y77IHGiSQ9Ttx9f4SGMgdpin2Daq6eYyzUdM9ET22RQ,5819
82
83
  pydantic_ai/providers/bedrock.py,sha256=8jz77ySKv6CzCktN9YbZb1736gZM0d_btcKvXiZSSHI,5784
83
84
  pydantic_ai/providers/cohere.py,sha256=LT6QaLPJBBlFUgYgXQOfKpbM9SXLzorWFxI7jNfOX_4,2892
@@ -109,8 +110,8 @@ pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fb
109
110
  pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
110
111
  pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
111
112
  pydantic_ai/toolsets/wrapper.py,sha256=mMuMPdko9PJUdcsexlRXbwViSwKKJfv6JE58d8HK3ds,1646
112
- pydantic_ai_slim-0.7.4.dist-info/METADATA,sha256=_fszjpgjDIP3jR56uyk7fD4f2-mlZcqtupc1P2pyv0E,4661
113
- pydantic_ai_slim-0.7.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
114
- pydantic_ai_slim-0.7.4.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
115
- pydantic_ai_slim-0.7.4.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
116
- pydantic_ai_slim-0.7.4.dist-info/RECORD,,
113
+ pydantic_ai_slim-0.7.5.dist-info/METADATA,sha256=UK0PS8ex-RsTV379rsLtBL9vifFciMESIoTLgjbrbec,4661
114
+ pydantic_ai_slim-0.7.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
115
+ pydantic_ai_slim-0.7.5.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
116
+ pydantic_ai_slim-0.7.5.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
117
+ pydantic_ai_slim-0.7.5.dist-info/RECORD,,