pydantic-ai-slim 0.2.17__tar.gz → 0.2.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (76) hide show
  1. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/PKG-INFO +5 -5
  2. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_agent_graph.py +10 -0
  3. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/mcp.py +145 -53
  4. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/__init__.py +2 -2
  5. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/anthropic.py +9 -5
  6. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/openai.py +7 -1
  7. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/google.py +1 -1
  8. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pyproject.toml +1 -1
  9. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/.gitignore +0 -0
  10. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/LICENSE +0 -0
  11. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/README.md +0 -0
  12. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/__init__.py +0 -0
  13. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/__main__.py +0 -0
  14. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_a2a.py +0 -0
  15. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_cli.py +0 -0
  16. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_function_schema.py +0 -0
  17. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_griffe.py +0 -0
  18. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_output.py +0 -0
  19. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_parts_manager.py +0 -0
  20. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_system_prompt.py +0 -0
  21. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/_utils.py +0 -0
  22. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/agent.py +0 -0
  23. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/common_tools/__init__.py +0 -0
  24. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  25. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/common_tools/tavily.py +0 -0
  26. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/direct.py +0 -0
  27. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/exceptions.py +0 -0
  28. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/ext/__init__.py +0 -0
  29. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/ext/langchain.py +0 -0
  30. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/format_as_xml.py +0 -0
  31. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/format_prompt.py +0 -0
  32. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/messages.py +0 -0
  33. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/bedrock.py +0 -0
  34. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/cohere.py +0 -0
  35. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/fallback.py +0 -0
  36. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/function.py +0 -0
  37. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/gemini.py +0 -0
  38. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/google.py +0 -0
  39. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/groq.py +0 -0
  40. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/instrumented.py +0 -0
  41. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/mistral.py +0 -0
  42. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/test.py +0 -0
  43. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/models/wrapper.py +0 -0
  44. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/__init__.py +0 -0
  45. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/_json_schema.py +0 -0
  46. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/amazon.py +0 -0
  47. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/anthropic.py +0 -0
  48. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/cohere.py +0 -0
  49. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/deepseek.py +0 -0
  50. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/google.py +0 -0
  51. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/grok.py +0 -0
  52. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/meta.py +0 -0
  53. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/mistral.py +0 -0
  54. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/openai.py +0 -0
  55. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/profiles/qwen.py +0 -0
  56. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/__init__.py +0 -0
  57. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/anthropic.py +0 -0
  58. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/azure.py +0 -0
  59. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/bedrock.py +0 -0
  60. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/cohere.py +0 -0
  61. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/deepseek.py +0 -0
  62. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/fireworks.py +0 -0
  63. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/google_gla.py +0 -0
  64. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/google_vertex.py +0 -0
  65. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/grok.py +0 -0
  66. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/groq.py +0 -0
  67. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/heroku.py +0 -0
  68. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/mistral.py +0 -0
  69. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/openai.py +0 -0
  70. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/openrouter.py +0 -0
  71. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/providers/together.py +0 -0
  72. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/py.typed +0 -0
  73. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/result.py +0 -0
  74. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/settings.py +0 -0
  75. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/tools.py +0 -0
  76. {pydantic_ai_slim-0.2.17 → pydantic_ai_slim-0.2.18}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.2.17
3
+ Version: 0.2.18
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.2.17
33
+ Requires-Dist: pydantic-graph==0.2.18
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.2.17; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.2.18; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.2.17; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.2.18; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.15.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -56,7 +56,7 @@ Requires-Dist: groq>=0.15.0; extra == 'groq'
56
56
  Provides-Extra: logfire
57
57
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
58
58
  Provides-Extra: mcp
59
- Requires-Dist: mcp>=1.9.2; (python_version >= '3.10') and extra == 'mcp'
59
+ Requires-Dist: mcp>=1.9.4; (python_version >= '3.10') and extra == 'mcp'
60
60
  Provides-Extra: mistral
61
61
  Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
62
62
  Provides-Extra: openai
@@ -183,6 +183,16 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
183
183
 
184
184
  if user_prompt is not None:
185
185
  parts.append(_messages.UserPromptPart(user_prompt))
186
+ elif (
187
+ len(parts) == 0
188
+ and message_history
189
+ and (last_message := message_history[-1])
190
+ and isinstance(last_message, _messages.ModelRequest)
191
+ ):
192
+ # Drop last message that came from history and reuse its parts
193
+ messages.pop()
194
+ parts.extend(last_message.parts)
195
+
186
196
  return messages, _messages.ModelRequest(parts, instructions=instructions)
187
197
 
188
198
  async def _reevaluate_dynamic_prompts(
@@ -5,25 +5,28 @@ import functools
5
5
  import json
6
6
  from abc import ABC, abstractmethod
7
7
  from collections.abc import AsyncIterator, Sequence
8
- from contextlib import AsyncExitStack, asynccontextmanager
8
+ from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager
9
9
  from dataclasses import dataclass
10
10
  from pathlib import Path
11
11
  from types import TracebackType
12
- from typing import Any
12
+ from typing import Any, Callable
13
13
 
14
14
  import anyio
15
15
  import httpx
16
16
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
17
+ from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
17
18
  from mcp.shared.message import SessionMessage
18
19
  from mcp.types import (
20
+ AudioContent,
19
21
  BlobResourceContents,
22
+ Content,
20
23
  EmbeddedResource,
21
24
  ImageContent,
22
25
  LoggingLevel,
23
26
  TextContent,
24
27
  TextResourceContents,
25
28
  )
26
- from typing_extensions import Self, assert_never
29
+ from typing_extensions import Self, assert_never, deprecated
27
30
 
28
31
  from pydantic_ai.exceptions import ModelRetry
29
32
  from pydantic_ai.messages import BinaryContent
@@ -39,7 +42,7 @@ except ImportError as _import_error:
39
42
  'you can use the `mcp` optional group — `pip install "pydantic-ai-slim[mcp]"`'
40
43
  ) from _import_error
41
44
 
42
- __all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP'
45
+ __all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP'
43
46
 
44
47
 
45
48
  class MCPServer(ABC):
@@ -160,9 +163,7 @@ class MCPServer(ABC):
160
163
  await self._exit_stack.aclose()
161
164
  self.is_running = False
162
165
 
163
- def _map_tool_result_part(
164
- self, part: TextContent | ImageContent | EmbeddedResource
165
- ) -> str | BinaryContent | dict[str, Any] | list[Any]:
166
+ def _map_tool_result_part(self, part: Content) -> str | BinaryContent | dict[str, Any] | list[Any]:
166
167
  # See https://github.com/jlowin/fastmcp/blob/main/docs/servers/tools.mdx#return-values
167
168
 
168
169
  if isinstance(part, TextContent):
@@ -175,6 +176,10 @@ class MCPServer(ABC):
175
176
  return text
176
177
  elif isinstance(part, ImageContent):
177
178
  return BinaryContent(data=base64.b64decode(part.data), media_type=part.mimeType)
179
+ elif isinstance(part, AudioContent):
180
+ # NOTE: The FastMCP server doesn't support audio content.
181
+ # See <https://github.com/modelcontextprotocol/python-sdk/issues/952> for more details.
182
+ return BinaryContent(data=base64.b64decode(part.data), media_type=part.mimeType) # pragma: no cover
178
183
  elif isinstance(part, EmbeddedResource):
179
184
  resource = part.resource
180
185
  if isinstance(resource, TextResourceContents):
@@ -287,44 +292,12 @@ class MCPServerStdio(MCPServer):
287
292
 
288
293
 
289
294
  @dataclass
290
- class MCPServerHTTP(MCPServer):
291
- """An MCP server that connects over streamable HTTP connections.
292
-
293
- This class implements the SSE transport from the MCP specification.
294
- See <https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse> for more information.
295
-
296
- The name "HTTP" is used since this implemented will be adapted in future to use the new
297
- [Streamable HTTP](https://github.com/modelcontextprotocol/specification/pull/206) currently in development.
298
-
299
- !!! note
300
- Using this class as an async context manager will create a new pool of HTTP connections to connect
301
- to a server which should already be running.
302
-
303
- Example:
304
- ```python {py="3.10"}
305
- from pydantic_ai import Agent
306
- from pydantic_ai.mcp import MCPServerHTTP
307
-
308
- server = MCPServerHTTP('http://localhost:3001/sse') # (1)!
309
- agent = Agent('openai:gpt-4o', mcp_servers=[server])
310
-
311
- async def main():
312
- async with agent.run_mcp_servers(): # (2)!
313
- ...
314
- ```
315
-
316
- 1. E.g. you might be connecting to a server run with [`mcp-run-python`](../mcp/run-python.md).
317
- 2. This will connect to a server running on `localhost:3001`.
318
- """
319
-
295
+ class _MCPServerHTTP(MCPServer):
320
296
  url: str
321
- """The URL of the SSE endpoint on the MCP server.
322
-
323
- For example for a server running locally, this might be `http://localhost:3001/sse`.
324
- """
297
+ """The URL of the endpoint on the MCP server."""
325
298
 
326
299
  headers: dict[str, Any] | None = None
327
- """Optional HTTP headers to be sent with each request to the SSE endpoint.
300
+ """Optional HTTP headers to be sent with each request to the endpoint.
328
301
 
329
302
  These headers will be passed directly to the underlying `httpx.AsyncClient`.
330
303
  Useful for authentication, custom headers, or other HTTP-specific configurations.
@@ -336,22 +309,22 @@ class MCPServerHTTP(MCPServer):
336
309
  """
337
310
 
338
311
  http_client: httpx.AsyncClient | None = None
339
- """An `httpx.AsyncClient` to use with the SSE endpoint.
312
+ """An `httpx.AsyncClient` to use with the endpoint.
340
313
 
341
314
  This client may be configured to use customized connection parameters like self-signed certificates.
342
315
 
343
316
  !!! note
344
317
  You can either pass `headers` or `http_client`, but not both.
345
318
 
346
- If you want to use both, you can pass the headers to the `http_client` instead:
319
+ If you want to use both, you can pass the headers to the `http_client` instead.
347
320
 
348
- ```python {py="3.10"}
321
+ ```python {py="3.10" test="skip"}
349
322
  import httpx
350
323
 
351
- from pydantic_ai.mcp import MCPServerHTTP
324
+ from pydantic_ai.mcp import MCPServerSSE
352
325
 
353
326
  http_client = httpx.AsyncClient(headers={'Authorization': 'Bearer ...'})
354
- server = MCPServerHTTP('http://localhost:3001/sse', http_client=http_client)
327
+ server = MCPServerSSE('http://localhost:3001/sse', http_client=http_client)
355
328
  ```
356
329
  """
357
330
 
@@ -369,10 +342,11 @@ class MCPServerHTTP(MCPServer):
369
342
  If no new messages are received within this time, the connection will be considered stale
370
343
  and may be closed. Defaults to 5 minutes (300 seconds).
371
344
  """
345
+
372
346
  log_level: LoggingLevel | None = None
373
347
  """The log level to set when connecting to the server, if any.
374
348
 
375
- See <https://modelcontextprotocol.io/specification/2025-03-26/server/utilities/logging#logging> for more details.
349
+ See <https://modelcontextprotocol.io/introduction#logging> for more details.
376
350
 
377
351
  If `None`, no log level will be set.
378
352
  """
@@ -385,6 +359,27 @@ class MCPServerHTTP(MCPServer):
385
359
  For example, if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
386
360
  """
387
361
 
362
+ @property
363
+ @abstractmethod
364
+ def _transport_client(
365
+ self,
366
+ ) -> Callable[
367
+ ...,
368
+ AbstractAsyncContextManager[
369
+ tuple[
370
+ MemoryObjectReceiveStream[SessionMessage | Exception],
371
+ MemoryObjectSendStream[SessionMessage],
372
+ GetSessionIdCallback,
373
+ ],
374
+ ]
375
+ | AbstractAsyncContextManager[
376
+ tuple[
377
+ MemoryObjectReceiveStream[SessionMessage | Exception],
378
+ MemoryObjectSendStream[SessionMessage],
379
+ ]
380
+ ],
381
+ ]: ...
382
+
388
383
  @asynccontextmanager
389
384
  async def client_streams(
390
385
  self,
@@ -394,8 +389,8 @@ class MCPServerHTTP(MCPServer):
394
389
  if self.http_client and self.headers:
395
390
  raise ValueError('`http_client` is mutually exclusive with `headers`.')
396
391
 
397
- sse_client_partial = functools.partial(
398
- sse_client,
392
+ transport_client_partial = functools.partial(
393
+ self._transport_client,
399
394
  url=self.url,
400
395
  timeout=self.timeout,
401
396
  sse_read_timeout=self.sse_read_timeout,
@@ -411,17 +406,114 @@ class MCPServerHTTP(MCPServer):
411
406
  assert self.http_client is not None
412
407
  return self.http_client
413
408
 
414
- async with sse_client_partial(httpx_client_factory=httpx_client_factory) as (read_stream, write_stream):
409
+ async with transport_client_partial(httpx_client_factory=httpx_client_factory) as (
410
+ read_stream,
411
+ write_stream,
412
+ *_,
413
+ ):
415
414
  yield read_stream, write_stream
416
415
  else:
417
- async with sse_client_partial(headers=self.headers) as (read_stream, write_stream):
416
+ async with transport_client_partial(headers=self.headers) as (read_stream, write_stream, *_):
418
417
  yield read_stream, write_stream
419
418
 
420
419
  def _get_log_level(self) -> LoggingLevel | None:
421
420
  return self.log_level
422
421
 
423
422
  def __repr__(self) -> str: # pragma: no cover
424
- return f'MCPServerHTTP(url={self.url!r}, tool_prefix={self.tool_prefix!r})'
423
+ return f'{self.__class__.__name__}(url={self.url!r}, tool_prefix={self.tool_prefix!r})'
425
424
 
426
425
  def _get_client_initialize_timeout(self) -> float: # pragma: no cover
427
426
  return self.timeout
427
+
428
+
429
+ @dataclass
430
+ class MCPServerSSE(_MCPServerHTTP):
431
+ """An MCP server that connects over streamable HTTP connections.
432
+
433
+ This class implements the SSE transport from the MCP specification.
434
+ See <https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse> for more information.
435
+
436
+ !!! note
437
+ Using this class as an async context manager will create a new pool of HTTP connections to connect
438
+ to a server which should already be running.
439
+
440
+ Example:
441
+ ```python {py="3.10"}
442
+ from pydantic_ai import Agent
443
+ from pydantic_ai.mcp import MCPServerSSE
444
+
445
+ server = MCPServerSSE('http://localhost:3001/sse') # (1)!
446
+ agent = Agent('openai:gpt-4o', mcp_servers=[server])
447
+
448
+ async def main():
449
+ async with agent.run_mcp_servers(): # (2)!
450
+ ...
451
+ ```
452
+
453
+ 1. E.g. you might be connecting to a server run with [`mcp-run-python`](../mcp/run-python.md).
454
+ 2. This will connect to a server running on `localhost:3001`.
455
+ """
456
+
457
+ @property
458
+ def _transport_client(self):
459
+ return sse_client # pragma: no cover
460
+
461
+
462
+ @deprecated('The `MCPServerHTTP` class is deprecated, use `MCPServerSSE` instead.')
463
+ @dataclass
464
+ class MCPServerHTTP(MCPServerSSE):
465
+ """An MCP server that connects over HTTP using the old SSE transport.
466
+
467
+ This class implements the SSE transport from the MCP specification.
468
+ See <https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse> for more information.
469
+
470
+ !!! note
471
+ Using this class as an async context manager will create a new pool of HTTP connections to connect
472
+ to a server which should already be running.
473
+
474
+ Example:
475
+ ```python {py="3.10" test="skip"}
476
+ from pydantic_ai import Agent
477
+ from pydantic_ai.mcp import MCPServerHTTP
478
+
479
+ server = MCPServerHTTP('http://localhost:3001/sse') # (1)!
480
+ agent = Agent('openai:gpt-4o', mcp_servers=[server])
481
+
482
+ async def main():
483
+ async with agent.run_mcp_servers(): # (2)!
484
+ ...
485
+ ```
486
+
487
+ 1. E.g. you might be connecting to a server run with [`mcp-run-python`](../mcp/run-python.md).
488
+ 2. This will connect to a server running on `localhost:3001`.
489
+ """
490
+
491
+
492
+ @dataclass
493
+ class MCPServerStreamableHTTP(_MCPServerHTTP):
494
+ """An MCP server that connects over HTTP using the Streamable HTTP transport.
495
+
496
+ This class implements the Streamable HTTP transport from the MCP specification.
497
+ See <https://modelcontextprotocol.io/introduction#streamable-http> for more information.
498
+
499
+ !!! note
500
+ Using this class as an async context manager will create a new pool of HTTP connections to connect
501
+ to a server which should already be running.
502
+
503
+ Example:
504
+ ```python {py="3.10"}
505
+ from pydantic_ai import Agent
506
+ from pydantic_ai.mcp import MCPServerStreamableHTTP
507
+
508
+ server = MCPServerStreamableHTTP('http://localhost:8000/mcp') # (1)!
509
+ agent = Agent('openai:gpt-4o', mcp_servers=[server])
510
+
511
+ async def main():
512
+ async with agent.run_mcp_servers(): # (2)!
513
+ ...
514
+ ```
515
+ """
516
+
517
+ @property
518
+ def _transport_client(self):
519
+ return streamablehttp_client # pragma: no cover
@@ -555,9 +555,9 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
555
555
 
556
556
  return OpenAIModel(model_name, provider=provider)
557
557
  elif provider in ('google-gla', 'google-vertex'):
558
- from .gemini import GeminiModel
558
+ from .google import GoogleModel
559
559
 
560
- return GeminiModel(model_name, provider=provider)
560
+ return GoogleModel(model_name, provider=provider)
561
561
  elif provider == 'groq':
562
562
  from .groq import GroqModel
563
563
 
@@ -276,7 +276,7 @@ class AnthropicModel(Model):
276
276
  tools += [self._map_tool_definition(r) for r in model_request_parameters.output_tools]
277
277
  return tools
278
278
 
279
- async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[BetaMessageParam]]:
279
+ async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[BetaMessageParam]]: # noqa: C901
280
280
  """Just maps a `pydantic_ai.Message` to a `anthropic.types.MessageParam`."""
281
281
  system_prompt_parts: list[str] = []
282
282
  anthropic_messages: list[BetaMessageParam] = []
@@ -315,7 +315,8 @@ class AnthropicModel(Model):
315
315
  assistant_content_params: list[BetaTextBlockParam | BetaToolUseBlockParam] = []
316
316
  for response_part in m.parts:
317
317
  if isinstance(response_part, TextPart):
318
- assistant_content_params.append(BetaTextBlockParam(text=response_part.content, type='text'))
318
+ if response_part.content: # Only add non-empty text
319
+ assistant_content_params.append(BetaTextBlockParam(text=response_part.content, type='text'))
319
320
  else:
320
321
  tool_use_block_param = BetaToolUseBlockParam(
321
322
  id=_guard_tool_call_id(t=response_part),
@@ -324,7 +325,8 @@ class AnthropicModel(Model):
324
325
  input=response_part.args_as_dict(),
325
326
  )
326
327
  assistant_content_params.append(tool_use_block_param)
327
- anthropic_messages.append(BetaMessageParam(role='assistant', content=assistant_content_params))
328
+ if len(assistant_content_params) > 0:
329
+ anthropic_messages.append(BetaMessageParam(role='assistant', content=assistant_content_params))
328
330
  else:
329
331
  assert_never(m)
330
332
  system_prompt = '\n\n'.join(system_prompt_parts)
@@ -337,11 +339,13 @@ class AnthropicModel(Model):
337
339
  part: UserPromptPart,
338
340
  ) -> AsyncGenerator[BetaContentBlockParam]:
339
341
  if isinstance(part.content, str):
340
- yield BetaTextBlockParam(text=part.content, type='text')
342
+ if part.content: # Only yield non-empty text
343
+ yield BetaTextBlockParam(text=part.content, type='text')
341
344
  else:
342
345
  for item in part.content:
343
346
  if isinstance(item, str):
344
- yield BetaTextBlockParam(text=item, type='text')
347
+ if item: # Only yield non-empty text
348
+ yield BetaTextBlockParam(text=item, type='text')
345
349
  elif isinstance(item, BinaryContent):
346
350
  if item.is_image:
347
351
  yield BetaImageBlockParam(
@@ -613,7 +613,13 @@ class OpenAIResponsesModel(Model):
613
613
  for item in response.output:
614
614
  if item.type == 'function_call':
615
615
  items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
616
- return ModelResponse(items, usage=_map_usage(response), model_name=response.model, timestamp=timestamp)
616
+ return ModelResponse(
617
+ items,
618
+ usage=_map_usage(response),
619
+ model_name=response.model,
620
+ vendor_id=response.id,
621
+ timestamp=timestamp,
622
+ )
617
623
 
618
624
  async def _process_streamed_response(
619
625
  self, response: AsyncStream[responses.ResponseStreamEvent]
@@ -84,7 +84,7 @@ class GoogleProvider(Provider[genai.Client]):
84
84
  """
85
85
  if client is None:
86
86
  # NOTE: We are keeping GEMINI_API_KEY for backwards compatibility.
87
- api_key = api_key or os.environ.get('GOOGLE_API_KEY')
87
+ api_key = api_key or os.getenv('GOOGLE_API_KEY') or os.getenv('GEMINI_API_KEY')
88
88
 
89
89
  if vertexai is None: # pragma: lax no cover
90
90
  vertexai = bool(location or project or credentials)
@@ -75,7 +75,7 @@ tavily = ["tavily-python>=0.5.0"]
75
75
  # CLI
76
76
  cli = ["rich>=13", "prompt-toolkit>=3", "argcomplete>=3.5.0"]
77
77
  # MCP
78
- mcp = ["mcp>=1.9.2; python_version >= '3.10'"]
78
+ mcp = ["mcp>=1.9.4; python_version >= '3.10'"]
79
79
  # Evals
80
80
  evals = ["pydantic-evals=={{ version }}"]
81
81
  # A2A