pydantic-ai-slim 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -2,6 +2,7 @@ from __future__ import annotations as _annotations
2
2
 
3
3
  import asyncio
4
4
  import dataclasses
5
+ import hashlib
5
6
  from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
6
7
  from contextlib import asynccontextmanager, contextmanager
7
8
  from contextvars import ContextVar
@@ -92,6 +93,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
92
93
 
93
94
  function_tools: dict[str, Tool[DepsT]] = dataclasses.field(repr=False)
94
95
  mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
96
+ default_retries: int
95
97
 
96
98
  tracer: Tracer
97
99
 
@@ -546,6 +548,13 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT
546
548
  )
547
549
 
548
550
 
551
+ def multi_modal_content_identifier(identifier: str | bytes) -> str:
552
+ """Generate stable identifier for multi-modal content to help LLM in finding a specific file in tool call responses."""
553
+ if isinstance(identifier, str):
554
+ identifier = identifier.encode('utf-8')
555
+ return hashlib.sha1(identifier).hexdigest()[:6]
556
+
557
+
549
558
  async def process_function_tools( # noqa C901
550
559
  tool_calls: list[_messages.ToolCallPart],
551
560
  output_tool_name: str | None,
@@ -648,8 +657,6 @@ async def process_function_tools( # noqa C901
648
657
  for tool, call in calls_to_run
649
658
  ]
650
659
 
651
- file_index = 1
652
-
653
660
  pending = tasks
654
661
  while pending:
655
662
  done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
@@ -661,17 +668,38 @@ async def process_function_tools( # noqa C901
661
668
  if isinstance(result, _messages.RetryPromptPart):
662
669
  results_by_index[index] = result
663
670
  elif isinstance(result, _messages.ToolReturnPart):
664
- if isinstance(result.content, _messages.MultiModalContentTypes):
665
- user_parts.append(
666
- _messages.UserPromptPart(
667
- content=[f'This is file {file_index}:', result.content],
668
- timestamp=result.timestamp,
669
- part_kind='user-prompt',
671
+ contents: list[Any]
672
+ single_content: bool
673
+ if isinstance(result.content, list):
674
+ contents = result.content # type: ignore
675
+ single_content = False
676
+ else:
677
+ contents = [result.content]
678
+ single_content = True
679
+
680
+ processed_contents: list[Any] = []
681
+ for content in contents:
682
+ if isinstance(content, _messages.MultiModalContentTypes):
683
+ if isinstance(content, _messages.BinaryContent):
684
+ identifier = multi_modal_content_identifier(content.data)
685
+ else:
686
+ identifier = multi_modal_content_identifier(content.url)
687
+
688
+ user_parts.append(
689
+ _messages.UserPromptPart(
690
+ content=[f'This is file {identifier}:', content],
691
+ timestamp=result.timestamp,
692
+ part_kind='user-prompt',
693
+ )
670
694
  )
671
- )
695
+ processed_contents.append(f'See file {identifier}')
696
+ else:
697
+ processed_contents.append(content)
672
698
 
673
- result.content = f'See file {file_index}.'
674
- file_index += 1
699
+ if single_content:
700
+ result.content = processed_contents[0]
701
+ else:
702
+ result.content = processed_contents
675
703
 
676
704
  results_by_index[index] = result
677
705
  else:
@@ -710,7 +738,7 @@ async def _tool_from_mcp_server(
710
738
  for server in ctx.deps.mcp_servers:
711
739
  tools = await server.list_tools()
712
740
  if tool_name in {tool.name for tool in tools}:
713
- return Tool(name=tool_name, function=run_tool, takes_ctx=True)
741
+ return Tool(name=tool_name, function=run_tool, takes_ctx=True, max_retries=ctx.deps.default_retries)
714
742
  return None
715
743
 
716
744
 
pydantic_ai/agent.py CHANGED
@@ -658,6 +658,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
658
658
  output_validators=output_validators,
659
659
  function_tools=self._function_tools,
660
660
  mcp_servers=self._mcp_servers,
661
+ default_retries=self._default_retries,
661
662
  tracer=tracer,
662
663
  get_instructions=get_instructions,
663
664
  )
pydantic_ai/mcp.py CHANGED
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import base64
4
+ import json
3
5
  from abc import ABC, abstractmethod
4
6
  from collections.abc import AsyncIterator, Sequence
5
7
  from contextlib import AsyncExitStack, asynccontextmanager
@@ -9,16 +11,25 @@ from types import TracebackType
9
11
  from typing import Any
10
12
 
11
13
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
12
- from mcp.types import JSONRPCMessage, LoggingLevel
13
- from typing_extensions import Self
14
-
14
+ from mcp.types import (
15
+ BlobResourceContents,
16
+ EmbeddedResource,
17
+ ImageContent,
18
+ JSONRPCMessage,
19
+ LoggingLevel,
20
+ TextContent,
21
+ TextResourceContents,
22
+ )
23
+ from typing_extensions import Self, assert_never
24
+
25
+ from pydantic_ai.exceptions import ModelRetry
26
+ from pydantic_ai.messages import BinaryContent
15
27
  from pydantic_ai.tools import ToolDefinition
16
28
 
17
29
  try:
18
30
  from mcp.client.session import ClientSession
19
31
  from mcp.client.sse import sse_client
20
32
  from mcp.client.stdio import StdioServerParameters, stdio_client
21
- from mcp.types import CallToolResult
22
33
  except ImportError as _import_error:
23
34
  raise ImportError(
24
35
  'Please install the `mcp` package to use the MCP server, '
@@ -74,7 +85,9 @@ class MCPServer(ABC):
74
85
  for tool in tools.tools
75
86
  ]
76
87
 
77
- async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> CallToolResult:
88
+ async def call_tool(
89
+ self, tool_name: str, arguments: dict[str, Any]
90
+ ) -> str | BinaryContent | dict[str, Any] | list[Any] | Sequence[str | BinaryContent | dict[str, Any] | list[Any]]:
78
91
  """Call a tool on the server.
79
92
 
80
93
  Args:
@@ -83,8 +96,21 @@ class MCPServer(ABC):
83
96
 
84
97
  Returns:
85
98
  The result of the tool call.
99
+
100
+ Raises:
101
+ ModelRetry: If the tool call fails.
86
102
  """
87
- return await self._client.call_tool(tool_name, arguments)
103
+ result = await self._client.call_tool(tool_name, arguments)
104
+
105
+ content = [self._map_tool_result_part(part) for part in result.content]
106
+
107
+ if result.isError:
108
+ text = '\n'.join(str(part) for part in content)
109
+ raise ModelRetry(text)
110
+
111
+ if len(content) == 1:
112
+ return content[0]
113
+ return content
88
114
 
89
115
  async def __aenter__(self) -> Self:
90
116
  self._exit_stack = AsyncExitStack()
@@ -105,6 +131,35 @@ class MCPServer(ABC):
105
131
  await self._exit_stack.aclose()
106
132
  self.is_running = False
107
133
 
134
+ def _map_tool_result_part(
135
+ self, part: TextContent | ImageContent | EmbeddedResource
136
+ ) -> str | BinaryContent | dict[str, Any] | list[Any]:
137
+ # See https://github.com/jlowin/fastmcp/blob/main/docs/servers/tools.mdx#return-values
138
+
139
+ if isinstance(part, TextContent):
140
+ text = part.text
141
+ if text.startswith(('[', '{')):
142
+ try:
143
+ return json.loads(text)
144
+ except ValueError:
145
+ pass
146
+ return text
147
+ elif isinstance(part, ImageContent):
148
+ return BinaryContent(data=base64.b64decode(part.data), media_type=part.mimeType)
149
+ elif isinstance(part, EmbeddedResource):
150
+ resource = part.resource
151
+ if isinstance(resource, TextResourceContents):
152
+ return resource.text
153
+ elif isinstance(resource, BlobResourceContents):
154
+ return BinaryContent(
155
+ data=base64.b64decode(resource.blob),
156
+ media_type=resource.mimeType or 'application/octet-stream',
157
+ )
158
+ else:
159
+ assert_never(resource)
160
+ else:
161
+ assert_never(part)
162
+
108
163
 
109
164
  @dataclass
110
165
  class MCPServerStdio(MCPServer):
pydantic_ai/messages.py CHANGED
@@ -832,4 +832,6 @@ class FunctionToolResultEvent:
832
832
  """Event type identifier, used as a discriminator."""
833
833
 
834
834
 
835
- HandleResponseEvent = Annotated[Union[FunctionToolCallEvent, FunctionToolResultEvent], pydantic.Discriminator('kind')]
835
+ HandleResponseEvent = Annotated[
836
+ Union[FunctionToolCallEvent, FunctionToolResultEvent], pydantic.Discriminator('event_kind')
837
+ ]
@@ -90,7 +90,7 @@ See [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models)
90
90
  """
91
91
 
92
92
 
93
- class AnthropicModelSettings(ModelSettings):
93
+ class AnthropicModelSettings(ModelSettings, total=False):
94
94
  """Settings used for an Anthropic model request.
95
95
 
96
96
  ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
@@ -109,10 +109,6 @@ class AnthropicModel(Model):
109
109
  Internally, this uses the [Anthropic Python client](https://github.com/anthropics/anthropic-sdk-python) to interact with the API.
110
110
 
111
111
  Apart from `__init__`, all methods are private or match those of the base class.
112
-
113
- !!! note
114
- The `AnthropicModel` class does not yet support streaming responses.
115
- We anticipate adding support for streaming responses in a near-term future release.
116
112
  """
117
113
 
118
114
  client: AsyncAnthropic = field(repr=False)
@@ -225,6 +221,8 @@ class AnthropicModel(Model):
225
221
  system_prompt, anthropic_messages = await self._map_message(messages)
226
222
 
227
223
  try:
224
+ extra_headers = model_settings.get('extra_headers', {})
225
+ extra_headers.setdefault('User-Agent', get_user_agent())
228
226
  return await self.client.messages.create(
229
227
  max_tokens=model_settings.get('max_tokens', 1024),
230
228
  system=system_prompt or NOT_GIVEN,
@@ -238,7 +236,7 @@ class AnthropicModel(Model):
238
236
  top_p=model_settings.get('top_p', NOT_GIVEN),
239
237
  timeout=model_settings.get('timeout', NOT_GIVEN),
240
238
  metadata=model_settings.get('anthropic_metadata', NOT_GIVEN),
241
- extra_headers={'User-Agent': get_user_agent()},
239
+ extra_headers=extra_headers,
242
240
  extra_body=model_settings.get('extra_body'),
243
241
  )
244
242
  except APIStatusError as e:
@@ -409,13 +407,27 @@ def _map_usage(message: AnthropicMessage | RawMessageStreamEvent) -> usage.Usage
409
407
  if response_usage is None:
410
408
  return usage.Usage()
411
409
 
412
- request_tokens = getattr(response_usage, 'input_tokens', None)
410
+ # Store all integer-typed usage values in the details dict
411
+ response_usage_dict = response_usage.model_dump()
412
+ details: dict[str, int] = {}
413
+ for key, value in response_usage_dict.items():
414
+ if isinstance(value, int):
415
+ details[key] = value
416
+
417
+ # Usage coming from the RawMessageDeltaEvent doesn't have input token data, hence the getattr call
418
+ # Tokens are only counted once between input_tokens, cache_creation_input_tokens, and cache_read_input_tokens
419
+ # This approach maintains request_tokens as the count of all input tokens, with cached counts as details
420
+ request_tokens = (
421
+ getattr(response_usage, 'input_tokens', 0)
422
+ + (getattr(response_usage, 'cache_creation_input_tokens', 0) or 0) # These can be missing, None, or int
423
+ + (getattr(response_usage, 'cache_read_input_tokens', 0) or 0)
424
+ )
413
425
 
414
426
  return usage.Usage(
415
- # Usage coming from the RawMessageDeltaEvent doesn't have input token data, hence this getattr
416
- request_tokens=request_tokens,
427
+ request_tokens=request_tokens or None,
417
428
  response_tokens=response_usage.output_tokens,
418
- total_tokens=(request_tokens or 0) + response_usage.output_tokens,
429
+ total_tokens=request_tokens + response_usage.output_tokens,
430
+ details=details or None,
419
431
  )
420
432
 
421
433
 
@@ -355,7 +355,7 @@ class BedrockConverseModel(Model):
355
355
 
356
356
  if max_tokens := model_settings.get('max_tokens'):
357
357
  inference_config['maxTokens'] = max_tokens
358
- if temperature := model_settings.get('temperature'):
358
+ if (temperature := model_settings.get('temperature')) is not None:
359
359
  inference_config['temperature'] = temperature
360
360
  if top_p := model_settings.get('top_p'):
361
361
  inference_config['topP'] = top_p
@@ -78,7 +78,7 @@ See [Cohere's docs](https://docs.cohere.com/v2/docs/models) for a list of all av
78
78
  """
79
79
 
80
80
 
81
- class CohereModelSettings(ModelSettings):
81
+ class CohereModelSettings(ModelSettings, total=False):
82
82
  """Settings used for a Cohere model request.
83
83
 
84
84
  ALL FIELDS MUST BE `cohere_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
@@ -73,7 +73,7 @@ See [the Gemini API docs](https://ai.google.dev/gemini-api/docs/models/gemini#mo
73
73
  """
74
74
 
75
75
 
76
- class GeminiModelSettings(ModelSettings):
76
+ class GeminiModelSettings(ModelSettings, total=False):
77
77
  """Settings used for a Gemini model request.
78
78
 
79
79
  ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
@@ -81,6 +81,18 @@ class GeminiModelSettings(ModelSettings):
81
81
 
82
82
  gemini_safety_settings: list[GeminiSafetySettings]
83
83
 
84
+ gemini_thinking_config: ThinkingConfig
85
+ """Thinking is "on" by default in both the API and AI Studio.
86
+
87
+ Being on by default doesn't mean the model will send back thoughts. For that, you would need to set `include_thoughts`
88
+ to `True`, but since end of January 2025, `thoughts` are not returned anymore, and are only displayed in the Google
89
+ AI Studio. See https://discuss.ai.google.dev/t/thoughts-are-missing-cot-not-included-anymore/63653 for more details.
90
+
91
+ If you want to avoid the model spending any tokens on thinking, you can set `thinking_budget` to `0`.
92
+
93
+ See more about it on <https://ai.google.dev/gemini-api/docs/thinking>.
94
+ """
95
+
84
96
 
85
97
  @dataclass(init=False)
86
98
  class GeminiModel(Model):
@@ -223,7 +235,9 @@ class GeminiModel(Model):
223
235
  generation_config['presence_penalty'] = presence_penalty
224
236
  if (frequency_penalty := model_settings.get('frequency_penalty')) is not None:
225
237
  generation_config['frequency_penalty'] = frequency_penalty
226
- if (gemini_safety_settings := model_settings.get('gemini_safety_settings')) != []:
238
+ if (thinkingConfig := model_settings.get('gemini_thinking_config')) is not None:
239
+ generation_config['thinking_config'] = thinkingConfig # pragma: no cover
240
+ if (gemini_safety_settings := model_settings.get('gemini_safety_settings')) is not None:
227
241
  request_data['safetySettings'] = gemini_safety_settings
228
242
  if generation_config:
229
243
  request_data['generationConfig'] = generation_config
@@ -497,6 +511,16 @@ class GeminiSafetySettings(TypedDict):
497
511
  """
498
512
 
499
513
 
514
+ class ThinkingConfig(TypedDict, total=False):
515
+ """The thinking features configuration."""
516
+
517
+ include_thoughts: Annotated[bool, pydantic.Field(alias='includeThoughts')]
518
+ """Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available."""
519
+
520
+ thinking_budget: Annotated[int, pydantic.Field(alias='thinkingBudget')]
521
+ """Indicates the thinking budget in tokens."""
522
+
523
+
500
524
  class _GeminiGenerationConfig(TypedDict, total=False):
501
525
  """Schema for an API request to the Gemini API.
502
526
 
@@ -511,6 +535,7 @@ class _GeminiGenerationConfig(TypedDict, total=False):
511
535
  presence_penalty: float
512
536
  frequency_penalty: float
513
537
  stop_sequences: list[str]
538
+ thinking_config: ThinkingConfig
514
539
 
515
540
 
516
541
  class _GeminiContent(TypedDict):
@@ -82,7 +82,7 @@ See <https://console.groq.com/docs/models> for an up to date date list of models
82
82
  """
83
83
 
84
84
 
85
- class GroqModelSettings(ModelSettings):
85
+ class GroqModelSettings(ModelSettings, total=False):
86
86
  """Settings used for a Groq model request.
87
87
 
88
88
  ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
@@ -200,6 +200,8 @@ class GroqModel(Model):
200
200
  groq_messages = self._map_messages(messages)
201
201
 
202
202
  try:
203
+ extra_headers = model_settings.get('extra_headers', {})
204
+ extra_headers.setdefault('User-Agent', get_user_agent())
203
205
  return await self.client.chat.completions.create(
204
206
  model=str(self._model_name),
205
207
  messages=groq_messages,
@@ -217,7 +219,7 @@ class GroqModel(Model):
217
219
  presence_penalty=model_settings.get('presence_penalty', NOT_GIVEN),
218
220
  frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
219
221
  logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
220
- extra_headers={'User-Agent': get_user_agent()},
222
+ extra_headers=extra_headers,
221
223
  extra_body=model_settings.get('extra_body'),
222
224
  )
223
225
  except APIStatusError as e:
@@ -91,7 +91,7 @@ Since [the Mistral docs](https://docs.mistral.ai/getting-started/models/models_o
91
91
  """
92
92
 
93
93
 
94
- class MistralModelSettings(ModelSettings):
94
+ class MistralModelSettings(ModelSettings, total=False):
95
95
  """Settings used for a Mistral model request.
96
96
 
97
97
  ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
@@ -264,6 +264,8 @@ class OpenAIModel(Model):
264
264
  openai_messages = await self._map_messages(messages)
265
265
 
266
266
  try:
267
+ extra_headers = model_settings.get('extra_headers', {})
268
+ extra_headers.setdefault('User-Agent', get_user_agent())
267
269
  return await self.client.chat.completions.create(
268
270
  model=self._model_name,
269
271
  messages=openai_messages,
@@ -284,7 +286,7 @@ class OpenAIModel(Model):
284
286
  logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
285
287
  reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
286
288
  user=model_settings.get('openai_user', NOT_GIVEN),
287
- extra_headers={'User-Agent': get_user_agent()},
289
+ extra_headers=extra_headers,
288
290
  extra_body=model_settings.get('extra_body'),
289
291
  )
290
292
  except APIStatusError as e:
@@ -610,6 +612,8 @@ class OpenAIResponsesModel(Model):
610
612
  reasoning = self._get_reasoning(model_settings)
611
613
 
612
614
  try:
615
+ extra_headers = model_settings.get('extra_headers', {})
616
+ extra_headers.setdefault('User-Agent', get_user_agent())
613
617
  return await self.client.responses.create(
614
618
  input=openai_messages,
615
619
  model=self._model_name,
@@ -625,7 +629,7 @@ class OpenAIResponsesModel(Model):
625
629
  timeout=model_settings.get('timeout', NOT_GIVEN),
626
630
  reasoning=reasoning,
627
631
  user=model_settings.get('openai_user', NOT_GIVEN),
628
- extra_headers={'User-Agent': get_user_agent()},
632
+ extra_headers=extra_headers,
629
633
  extra_body=model_settings.get('extra_body'),
630
634
  )
631
635
  except APIStatusError as e:
@@ -44,6 +44,7 @@ class MistralProvider(Provider[Mistral]):
44
44
  *,
45
45
  api_key: str | None = None,
46
46
  mistral_client: Mistral | None = None,
47
+ base_url: str | None = None,
47
48
  http_client: AsyncHTTPClient | None = None,
48
49
  ) -> None:
49
50
  """Create a new Mistral provider.
@@ -52,11 +53,13 @@ class MistralProvider(Provider[Mistral]):
52
53
  api_key: The API key to use for authentication, if not provided, the `MISTRAL_API_KEY` environment variable
53
54
  will be used if available.
54
55
  mistral_client: An existing `Mistral` client to use, if provided, `api_key` and `http_client` must be `None`.
56
+ base_url: The base url for the Mistral requests.
55
57
  http_client: An existing async client to use for making HTTP requests.
56
58
  """
57
59
  if mistral_client is not None:
58
60
  assert http_client is None, 'Cannot provide both `mistral_client` and `http_client`'
59
61
  assert api_key is None, 'Cannot provide both `mistral_client` and `api_key`'
62
+ assert base_url is None, 'Cannot provide both `mistral_client` and `base_url`'
60
63
  self._client = mistral_client
61
64
  else:
62
65
  api_key = api_key or os.environ.get('MISTRAL_API_KEY')
@@ -67,7 +70,7 @@ class MistralProvider(Provider[Mistral]):
67
70
  'to use the Mistral provider.'
68
71
  )
69
72
  elif http_client is not None:
70
- self._client = Mistral(api_key=api_key, async_client=http_client)
73
+ self._client = Mistral(api_key=api_key, async_client=http_client, server_url=base_url)
71
74
  else:
72
75
  http_client = cached_async_http_client(provider='mistral')
73
- self._client = Mistral(api_key=api_key, async_client=http_client)
76
+ self._client = Mistral(api_key=api_key, async_client=http_client, server_url=base_url)
pydantic_ai/settings.py CHANGED
@@ -141,6 +141,16 @@ class ModelSettings(TypedDict, total=False):
141
141
  * Cohere
142
142
  """
143
143
 
144
+ extra_headers: dict[str, str]
145
+ """Extra headers to send to the model.
146
+
147
+ Supported by:
148
+
149
+ * OpenAI
150
+ * Anthropic
151
+ * Groq
152
+ """
153
+
144
154
  extra_body: object
145
155
  """Extra body to send to the model.
146
156
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.1.8
32
+ Requires-Dist: pydantic-graph==0.1.10
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
45
45
  Provides-Extra: duckduckgo
46
46
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
47
47
  Provides-Extra: evals
48
- Requires-Dist: pydantic-evals==0.1.8; extra == 'evals'
48
+ Requires-Dist: pydantic-evals==0.1.10; extra == 'evals'
49
49
  Provides-Extra: groq
50
50
  Requires-Dist: groq>=0.15.0; extra == 'groq'
51
51
  Provides-Extra: logfire
@@ -1,6 +1,6 @@
1
1
  pydantic_ai/__init__.py,sha256=5flxyMQJVrHRMQ3MYaZf1el2ctNs0JmPClKbw2Q-Lsk,1160
2
2
  pydantic_ai/__main__.py,sha256=AW8FzscUWPFtIrQBG0QExLxTQehKtt5FnFVnOT200OE,122
3
- pydantic_ai/_agent_graph.py,sha256=2DKbmiiA1OooCXQ9WYh7sup0XpYxuvEtPiVsweMnXIU,33880
3
+ pydantic_ai/_agent_graph.py,sha256=GDGADjkGblUfJzn2YXUlwHYRZWe6ZUKYBTmQasHAYzo,35303
4
4
  pydantic_ai/_cli.py,sha256=j3uSu5lZMNKb876HHMHwVZc1nzJPn6NER5ysrDMFrvo,10730
5
5
  pydantic_ai/_griffe.py,sha256=Sf_DisE9k2TA0VFeVIK2nf1oOct5MygW86PBCACJkFA,5244
6
6
  pydantic_ai/_output.py,sha256=w_kBc5Lx5AmI0APbohxxYgpFd5VAwh6K0IjP7QIOu9U,11209
@@ -8,15 +8,15 @@ pydantic_ai/_parts_manager.py,sha256=HIi6eth7z2g0tOn6iQYc633xMqy4d_xZ8vwka8J8150
8
8
  pydantic_ai/_pydantic.py,sha256=1EO1tv-ULj3l_L1qMcC7gIOKTL2e2a-xTbUD_kqKiOg,8921
9
9
  pydantic_ai/_system_prompt.py,sha256=602c2jyle2R_SesOrITBDETZqsLk4BZ8Cbo8yEhmx04,1120
10
10
  pydantic_ai/_utils.py,sha256=Vlww1AMQMTvFfGRlFKAyvl4VrE24Lk1MH28EwVTWy8c,10122
11
- pydantic_ai/agent.py,sha256=IVl1uEntcg6qIQbTgD5lXeLogCYR1vKr68tPEDLq5Fs,88853
11
+ pydantic_ai/agent.py,sha256=Ak3S2cvatdMRdOlhB-v8EIeeSGK7Sj-gMPlJXRp8WUE,88904
12
12
  pydantic_ai/exceptions.py,sha256=gvbFsFkAzSXOo_d1nfjy09kDHUGv1j5q70Uk-wKYGi8,3167
13
13
  pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
14
14
  pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
15
- pydantic_ai/mcp.py,sha256=l33wV5JaZTXBYEHMxfdFP2e5SlqGw_D7bB90zDNKAdA,9440
16
- pydantic_ai/messages.py,sha256=QASltyfRh5Rs6zK-QJL7vWAmyuM6gpsQhxnFAxQxlYo,30326
15
+ pydantic_ai/mcp.py,sha256=UsiBsg2ZuFh0OTMc-tvvxzfyW9YiPSIe6h8_KGloxqI,11312
16
+ pydantic_ai/messages.py,sha256=c8TW8hLNR1DO39jEmFbfbIVJInjN-Peqm3PuII4GoPI,30338
17
17
  pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  pydantic_ai/result.py,sha256=DgoUd0LqNd9DWPab6iwculKYvZ5JZHuGvToj0kkibvs,27625
19
- pydantic_ai/settings.py,sha256=n-Y69s0Tkbo2k65ZwNmIu1nMypO9HadqUDLjZgGQS_Q,3374
19
+ pydantic_ai/settings.py,sha256=U2XzZ9y1fIi_L6yCGTugZRxfk7_01rk5GKSgFqySHL4,3520
20
20
  pydantic_ai/tools.py,sha256=Tq-Ba5_7Cx3N3iBExFy06JkelZAz_mOi-K8zLXgCBDU,16923
21
21
  pydantic_ai/usage.py,sha256=9sqoIv_RVVUhKXQScTDqUJc074gifsuSzc9_NOt7C3g,5394
22
22
  pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -24,16 +24,16 @@ pydantic_ai/common_tools/duckduckgo.py,sha256=Ty9tu1rCwMfGKgz1JAaC2q_4esmL6QvpkH
24
24
  pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQXD7E4,2495
25
25
  pydantic_ai/models/__init__.py,sha256=1SCuDT6PJn38raFuSiz_8eEyglCxs54Bq2UGpYBg1s4,19963
26
26
  pydantic_ai/models/_json_schema.py,sha256=GXcELRirUtqfpHlNKlRzCR_p6_-Xl7YgIWYCxnshsLA,6414
27
- pydantic_ai/models/anthropic.py,sha256=YiFBUOzZEs2WAks2JnmO1Wfj_Wu3GuSJL91-BoOXiXs,20333
28
- pydantic_ai/models/bedrock.py,sha256=QA5yBhyV0l7ZrT1NhJvFzX0mOlLqlrRCD5Nqss3F-8k,25004
29
- pydantic_ai/models/cohere.py,sha256=0clDIXPETo0pjfNgtI-sWqjjZWPkHqFalag_nN3HSNI,11685
27
+ pydantic_ai/models/anthropic.py,sha256=SQiAaVB79PCNBWJOdIaUR0tHP74zYYdEKDQOB5zyx0s,21005
28
+ pydantic_ai/models/bedrock.py,sha256=Zu7AmxD3b-hPok8xzGw8QZGyR7CRAfsBJTsvcRJiJys,25018
29
+ pydantic_ai/models/cohere.py,sha256=8fABcqGB-aOrfpl_4qLUgkeLV_-Q8-ixN9nui536yO0,11698
30
30
  pydantic_ai/models/fallback.py,sha256=sKHyQ1P6zjWORjWgbuhaxyntOfQyDCS8km8FMrlNy3U,4998
31
31
  pydantic_ai/models/function.py,sha256=FJvTMwT7p8I_h14ZrudLAI5mmbXxF8AX-Nhz8LXy5U0,11373
32
- pydantic_ai/models/gemini.py,sha256=78Geduf2rU5zKQeq7f0y95H5hTW9vPdXIhhnCV6QP2Q,35255
33
- pydantic_ai/models/groq.py,sha256=-_qk5hPUVi3TjA97bZCszME2hyZIVJ11MLGGTamjSMA,17095
32
+ pydantic_ai/models/gemini.py,sha256=rdTtsiRCp0zZ13hAzpZoVy5jJIIm0_JvYwAMR7wRvkg,36621
33
+ pydantic_ai/models/groq.py,sha256=cd1XaBAIuZKer9OGPqf9ffGKXhfcUGCBJxlevNUn29U,17226
34
34
  pydantic_ai/models/instrumented.py,sha256=vJxd68CH-PsNNzRGfqfq-UzLMbpgol6BnrkqxYFpZ1I,11490
35
- pydantic_ai/models/mistral.py,sha256=jcZcw7i0z2j59JuE9S845knDPRqrxNvwMAsemEAOPRs,28974
36
- pydantic_ai/models/openai.py,sha256=mKAIAO0n827b9OVvb6QhREnZDzjRersWZP9pnURqBrQ,48056
35
+ pydantic_ai/models/mistral.py,sha256=vlHM0bAtRm8076H-uWsf-VG_q4RmiYDai1sanqlIdbo,28987
36
+ pydantic_ai/models/openai.py,sha256=4zW6PFsxvx8zaqqDyzmNXgjKT8tu6Lpzb3LvWLQkua0,48292
37
37
  pydantic_ai/models/test.py,sha256=_Fd7oKNA2p_1zXBMvQStnizGlx-ii-zisJx1nIEZn7c,16973
38
38
  pydantic_ai/models/wrapper.py,sha256=8wm4RF-MRZOxRVLefwMsxToopCX5Y4Xq2-Ugs5MtCK4,1710
39
39
  pydantic_ai/providers/__init__.py,sha256=lLlHq6B8qmu6Ag5biaZmJGDKELO46KjwP7-CDrz_T4Y,2592
@@ -45,9 +45,9 @@ pydantic_ai/providers/deepseek.py,sha256=_5JPzDGWsyVyTBX-yYYdy5aZwUOWNCVgoWI-UoB
45
45
  pydantic_ai/providers/google_gla.py,sha256=MJM7aRZRdP4kFlNg0ZHgC95O0wH02OQgbNiDQeK9fZo,1600
46
46
  pydantic_ai/providers/google_vertex.py,sha256=WAwPxKTARVzs8DFs2veEUOJSur0krDOo9-JWRHvfHew,9135
47
47
  pydantic_ai/providers/groq.py,sha256=DoY6qkfhuemuKB5JXhUkqG-3t1HQkxwSXoE_kHQIAK0,2788
48
- pydantic_ai/providers/mistral.py,sha256=fcR1uSwORo0jtevX7-wOjvcfT8ojMAaKY81uN5uYymM,2661
48
+ pydantic_ai/providers/mistral.py,sha256=FAS7yKn26yWy7LTmEiBSvqe0HpTXi8_nIf824vE6RFQ,2892
49
49
  pydantic_ai/providers/openai.py,sha256=ePF-QWwLkGkSE5w245gTTDVR3VoTIUqFoIhQ0TAoUiA,2866
50
- pydantic_ai_slim-0.1.8.dist-info/METADATA,sha256=Iu_mDDcuYRSoDW0IpokZRW0HGWkwVaBoiK-6MBx_71o,3551
51
- pydantic_ai_slim-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
- pydantic_ai_slim-0.1.8.dist-info/entry_points.txt,sha256=KxQSmlMS8GMTkwTsl4_q9a5nJvBjj3HWeXx688wLrKg,45
53
- pydantic_ai_slim-0.1.8.dist-info/RECORD,,
50
+ pydantic_ai_slim-0.1.10.dist-info/METADATA,sha256=9g0uS79cNZectheZJ9cRHrMiNm0-Gtk1IUnc-PVa4tg,3554
51
+ pydantic_ai_slim-0.1.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
+ pydantic_ai_slim-0.1.10.dist-info/entry_points.txt,sha256=KxQSmlMS8GMTkwTsl4_q9a5nJvBjj3HWeXx688wLrKg,45
53
+ pydantic_ai_slim-0.1.10.dist-info/RECORD,,