pydantic-ai-slim 0.0.45__tar.gz → 0.0.46__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (50) hide show
  1. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/PKG-INFO +2 -2
  2. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/agent.py +2 -2
  3. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/mcp.py +25 -1
  4. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/__init__.py +15 -6
  5. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/anthropic.py +2 -1
  6. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/azure.py +1 -1
  7. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/cohere.py +2 -3
  8. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/deepseek.py +2 -1
  9. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/google_gla.py +1 -1
  10. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/google_vertex.py +1 -1
  11. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/groq.py +2 -3
  12. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/mistral.py +2 -1
  13. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/openai.py +2 -1
  14. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pyproject.toml +2 -2
  15. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/.gitignore +0 -0
  16. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/README.md +0 -0
  17. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/__init__.py +0 -0
  18. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_agent_graph.py +0 -0
  19. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_cli.py +0 -0
  20. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_griffe.py +0 -0
  21. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_parts_manager.py +0 -0
  22. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_pydantic.py +0 -0
  23. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_result.py +0 -0
  24. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_system_prompt.py +0 -0
  25. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/_utils.py +0 -0
  26. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/common_tools/__init__.py +0 -0
  27. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  28. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/common_tools/tavily.py +0 -0
  29. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/exceptions.py +0 -0
  30. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/format_as_xml.py +0 -0
  31. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/messages.py +0 -0
  32. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/anthropic.py +0 -0
  33. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/bedrock.py +0 -0
  34. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/cohere.py +0 -0
  35. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/fallback.py +0 -0
  36. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/function.py +0 -0
  37. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/gemini.py +0 -0
  38. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/groq.py +0 -0
  39. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/instrumented.py +0 -0
  40. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/mistral.py +0 -0
  41. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/openai.py +0 -0
  42. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/test.py +0 -0
  43. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/models/wrapper.py +0 -0
  44. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/__init__.py +0 -0
  45. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/providers/bedrock.py +0 -0
  46. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/py.typed +0 -0
  47. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/result.py +0 -0
  48. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/settings.py +0 -0
  49. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/tools.py +0 -0
  50. {pydantic_ai_slim-0.0.45 → pydantic_ai_slim-0.0.46}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.45
3
+ Version: 0.0.46
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.0.45
32
+ Requires-Dist: pydantic-graph==0.0.46
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -13,7 +13,7 @@ from pydantic.json_schema import GenerateJsonSchema
13
13
  from typing_extensions import TypeGuard, TypeVar, deprecated
14
14
 
15
15
  from pydantic_graph import End, Graph, GraphRun, GraphRunContext
16
- from pydantic_graph._utils import run_until_complete
16
+ from pydantic_graph._utils import get_event_loop
17
17
 
18
18
  from . import (
19
19
  _agent_graph,
@@ -567,7 +567,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
567
567
  """
568
568
  if infer_name and self.name is None:
569
569
  self._infer_name(inspect.currentframe())
570
- return run_until_complete(
570
+ return get_event_loop().run_until_complete(
571
571
  self.run(
572
572
  user_prompt,
573
573
  result_type=result_type,
@@ -188,11 +188,35 @@ class MCPServerHTTP(MCPServer):
188
188
  For example for a server running locally, this might be `http://localhost:3001/sse`.
189
189
  """
190
190
 
191
+ headers: dict[str, Any] | None = None
192
+ """Optional HTTP headers to be sent with each request to the SSE endpoint.
193
+
194
+ These headers will be passed directly to the underlying `httpx.AsyncClient`.
195
+ Useful for authentication, custom headers, or other HTTP-specific configurations.
196
+ """
197
+
198
+ timeout: float = 5
199
+ """Initial connection timeout in seconds for establishing the SSE connection.
200
+
201
+ This timeout applies to the initial connection setup and handshake.
202
+ If the connection cannot be established within this time, the operation will fail.
203
+ """
204
+
205
+ sse_read_timeout: float = 60 * 5
206
+ """Maximum time in seconds to wait for new SSE messages before timing out.
207
+
208
+ This timeout applies to the long-lived SSE connection after it's established.
209
+ If no new messages are received within this time, the connection will be considered stale
210
+ and may be closed. Defaults to 5 minutes (300 seconds).
211
+ """
212
+
191
213
  @asynccontextmanager
192
214
  async def client_streams(
193
215
  self,
194
216
  ) -> AsyncIterator[
195
217
  tuple[MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectSendStream[JSONRPCMessage]]
196
218
  ]: # pragma: no cover
197
- async with sse_client(url=self.url) as (read_stream, write_stream):
219
+ async with sse_client(
220
+ url=self.url, headers=self.headers, timeout=self.timeout, sse_read_timeout=self.sse_read_timeout
221
+ ) as (read_stream, write_stream):
198
222
  yield read_stream, write_stream
@@ -431,32 +431,41 @@ def infer_model(model: Model | KnownModelName) -> Model:
431
431
  raise UserError(f'Unknown model: {model}')
432
432
 
433
433
 
434
- def cached_async_http_client(timeout: int = 600, connect: int = 5) -> httpx.AsyncClient:
435
- """Cached HTTPX async client so multiple agents and calls can share the same client.
434
+ def cached_async_http_client(*, provider: str | None = None, timeout: int = 600, connect: int = 5) -> httpx.AsyncClient:
435
+ """Cached HTTPX async client that creates a separate client for each provider.
436
+
437
+ The client is cached based on the provider parameter. If provider is None, it's used for non-provider specific
438
+ requests (like downloading images). Multiple agents and calls can share the same client when they use the same provider.
436
439
 
437
440
  There are good reasons why in production you should use a `httpx.AsyncClient` as an async context manager as
438
441
  described in [encode/httpx#2026](https://github.com/encode/httpx/pull/2026), but when experimenting or showing
439
- examples, it's very useful not to, this allows multiple Agents to use a single client.
442
+ examples, it's very useful not to.
440
443
 
441
444
  The default timeouts match those of OpenAI,
442
445
  see <https://github.com/openai/openai-python/blob/v1.54.4/src/openai/_constants.py#L9>.
443
446
  """
444
- client = _cached_async_http_client(timeout=timeout, connect=connect)
447
+ client = _cached_async_http_client(provider=provider, timeout=timeout, connect=connect)
445
448
  if client.is_closed:
446
449
  # This happens if the context manager is used, so we need to create a new client.
447
450
  _cached_async_http_client.cache_clear()
448
- client = _cached_async_http_client(timeout=timeout, connect=connect)
451
+ client = _cached_async_http_client(provider=provider, timeout=timeout, connect=connect)
449
452
  return client
450
453
 
451
454
 
452
455
  @cache
453
- def _cached_async_http_client(timeout: int = 600, connect: int = 5) -> httpx.AsyncClient:
456
+ def _cached_async_http_client(provider: str | None, timeout: int = 600, connect: int = 5) -> httpx.AsyncClient:
454
457
  return httpx.AsyncClient(
458
+ transport=_cached_async_http_transport(),
455
459
  timeout=httpx.Timeout(timeout=timeout, connect=connect),
456
460
  headers={'User-Agent': get_user_agent()},
457
461
  )
458
462
 
459
463
 
464
+ @cache
465
+ def _cached_async_http_transport() -> httpx.AsyncHTTPTransport:
466
+ return httpx.AsyncHTTPTransport()
467
+
468
+
460
469
  @cache
461
470
  def get_user_agent() -> str:
462
471
  """Get the user agent string for the HTTP client."""
@@ -70,4 +70,5 @@ class AnthropicProvider(Provider[AsyncAnthropic]):
70
70
  if http_client is not None:
71
71
  self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
72
72
  else:
73
- self._client = AsyncAnthropic(api_key=api_key, http_client=cached_async_http_client())
73
+ http_client = cached_async_http_client(provider='anthropic')
74
+ self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
@@ -97,7 +97,7 @@ class AzureProvider(Provider[AsyncOpenAI]):
97
97
  'Must provide one of the `api_version` argument or the `OPENAI_API_VERSION` environment variable'
98
98
  )
99
99
 
100
- http_client = http_client or cached_async_http_client()
100
+ http_client = http_client or cached_async_http_client(provider='azure')
101
101
  self._client = AsyncAzureOpenAI(
102
102
  azure_endpoint=azure_endpoint,
103
103
  api_key=api_key,
@@ -66,6 +66,5 @@ class CohereProvider(Provider[AsyncClientV2]):
66
66
  if http_client is not None:
67
67
  self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
68
68
  else:
69
- self._client = AsyncClientV2(
70
- api_key=api_key, httpx_client=cached_async_http_client(), base_url=base_url
71
- )
69
+ http_client = cached_async_http_client(provider='cohere')
70
+ self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
@@ -65,4 +65,5 @@ class DeepSeekProvider(Provider[AsyncOpenAI]):
65
65
  elif http_client is not None:
66
66
  self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
67
67
  else:
68
- self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=cached_async_http_client())
68
+ http_client = cached_async_http_client(provider='deepseek')
69
+ self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
@@ -39,7 +39,7 @@ class GoogleGLAProvider(Provider[httpx.AsyncClient]):
39
39
  'to use the Google GLA provider.'
40
40
  )
41
41
 
42
- self._client = http_client or cached_async_http_client()
42
+ self._client = http_client or cached_async_http_client(provider='google-gla')
43
43
  self._client.base_url = self.base_url
44
44
  # https://cloud.google.com/docs/authentication/api-keys-use#using-with-rest
45
45
  self._client.headers['X-Goog-Api-Key'] = api_key
@@ -97,7 +97,7 @@ class GoogleVertexProvider(Provider[httpx.AsyncClient]):
97
97
  if service_account_file and service_account_info:
98
98
  raise ValueError('Only one of `service_account_file` or `service_account_info` can be provided.')
99
99
 
100
- self._client = http_client or cached_async_http_client()
100
+ self._client = http_client or cached_async_http_client(provider='google-vertex')
101
101
  self.service_account_file = service_account_file
102
102
  self.service_account_info = service_account_info
103
103
  self.project_id = project_id
@@ -71,6 +71,5 @@ class GroqProvider(Provider[AsyncGroq]):
71
71
  elif http_client is not None:
72
72
  self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
73
73
  else:
74
- self._client = AsyncGroq(
75
- base_url=self.base_url, api_key=api_key, http_client=cached_async_http_client()
76
- )
74
+ http_client = cached_async_http_client(provider='groq')
75
+ self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
@@ -69,4 +69,5 @@ class MistralProvider(Provider[Mistral]):
69
69
  elif http_client is not None:
70
70
  self._client = Mistral(api_key=api_key, async_client=http_client)
71
71
  else:
72
- self._client = Mistral(api_key=api_key, async_client=cached_async_http_client())
72
+ http_client = cached_async_http_client(provider='mistral')
73
+ self._client = Mistral(api_key=api_key, async_client=http_client)
@@ -63,4 +63,5 @@ class OpenAIProvider(Provider[AsyncOpenAI]):
63
63
  elif http_client is not None:
64
64
  self._client = AsyncOpenAI(base_url=base_url, api_key=api_key, http_client=http_client)
65
65
  else:
66
- self._client = AsyncOpenAI(base_url=base_url, api_key=api_key, http_client=cached_async_http_client())
66
+ http_client = cached_async_http_client(provider='openai')
67
+ self._client = AsyncOpenAI(base_url=base_url, api_key=api_key, http_client=http_client)
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai-slim"
7
- version = "0.0.45"
7
+ version = "0.0.46"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
9
9
  authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
10
10
  license = "MIT"
@@ -36,7 +36,7 @@ dependencies = [
36
36
  "griffe>=1.3.2",
37
37
  "httpx>=0.27",
38
38
  "pydantic>=2.10",
39
- "pydantic-graph==0.0.45",
39
+ "pydantic-graph==0.0.46",
40
40
  "exceptiongroup; python_version < '3.11'",
41
41
  "opentelemetry-api>=1.28.0",
42
42
  "typing-inspection>=0.4.0",