pydantic-ai-slim 1.0.0b1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (38) hide show
  1. pydantic_ai/_a2a.py +1 -1
  2. pydantic_ai/_agent_graph.py +65 -49
  3. pydantic_ai/_parts_manager.py +3 -1
  4. pydantic_ai/_tool_manager.py +33 -6
  5. pydantic_ai/ag_ui.py +75 -43
  6. pydantic_ai/agent/__init__.py +10 -7
  7. pydantic_ai/durable_exec/dbos/__init__.py +6 -0
  8. pydantic_ai/durable_exec/dbos/_agent.py +718 -0
  9. pydantic_ai/durable_exec/dbos/_mcp_server.py +89 -0
  10. pydantic_ai/durable_exec/dbos/_model.py +137 -0
  11. pydantic_ai/durable_exec/dbos/_utils.py +10 -0
  12. pydantic_ai/durable_exec/temporal/_agent.py +71 -10
  13. pydantic_ai/exceptions.py +2 -2
  14. pydantic_ai/mcp.py +14 -26
  15. pydantic_ai/messages.py +90 -19
  16. pydantic_ai/models/__init__.py +9 -0
  17. pydantic_ai/models/anthropic.py +28 -11
  18. pydantic_ai/models/bedrock.py +6 -14
  19. pydantic_ai/models/gemini.py +3 -1
  20. pydantic_ai/models/google.py +58 -5
  21. pydantic_ai/models/groq.py +122 -34
  22. pydantic_ai/models/instrumented.py +29 -11
  23. pydantic_ai/models/openai.py +84 -29
  24. pydantic_ai/providers/__init__.py +4 -0
  25. pydantic_ai/providers/bedrock.py +11 -3
  26. pydantic_ai/providers/google_vertex.py +2 -1
  27. pydantic_ai/providers/groq.py +21 -2
  28. pydantic_ai/providers/litellm.py +134 -0
  29. pydantic_ai/retries.py +42 -2
  30. pydantic_ai/tools.py +18 -7
  31. pydantic_ai/toolsets/combined.py +2 -2
  32. pydantic_ai/toolsets/function.py +54 -19
  33. pydantic_ai/usage.py +37 -3
  34. {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.2.dist-info}/METADATA +9 -8
  35. {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.2.dist-info}/RECORD +38 -32
  36. {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.2.dist-info}/WHEEL +0 -0
  37. {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.2.dist-info}/entry_points.txt +0 -0
  38. {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,89 @@
1
+ from __future__ import annotations
2
+
3
+ from abc import ABC
4
+ from collections.abc import Callable
5
+ from typing import Any
6
+
7
+ from dbos import DBOS
8
+ from typing_extensions import Self
9
+
10
+ from pydantic_ai.mcp import MCPServer, ToolResult
11
+ from pydantic_ai.tools import AgentDepsT, RunContext
12
+ from pydantic_ai.toolsets.abstract import AbstractToolset, ToolsetTool
13
+ from pydantic_ai.toolsets.wrapper import WrapperToolset
14
+
15
+ from ._utils import StepConfig
16
+
17
+
18
+ class DBOSMCPServer(WrapperToolset[AgentDepsT], ABC):
19
+ """A wrapper for MCPServer that integrates with DBOS, turning call_tool and get_tools to DBOS steps."""
20
+
21
+ def __init__(
22
+ self,
23
+ wrapped: MCPServer,
24
+ *,
25
+ step_name_prefix: str,
26
+ step_config: StepConfig,
27
+ ):
28
+ super().__init__(wrapped)
29
+ self._step_config = step_config or {}
30
+ self._step_name_prefix = step_name_prefix
31
+ id_suffix = f'__{wrapped.id}' if wrapped.id else ''
32
+ self._name = f'{step_name_prefix}__mcp_server{id_suffix}'
33
+
34
+ # Wrap get_tools in a DBOS step.
35
+ @DBOS.step(
36
+ name=f'{self._name}.get_tools',
37
+ **self._step_config,
38
+ )
39
+ async def wrapped_get_tools_step(
40
+ ctx: RunContext[AgentDepsT],
41
+ ) -> dict[str, ToolsetTool[AgentDepsT]]:
42
+ return await super(DBOSMCPServer, self).get_tools(ctx)
43
+
44
+ self._dbos_wrapped_get_tools_step = wrapped_get_tools_step
45
+
46
+ # Wrap call_tool in a DBOS step.
47
+ @DBOS.step(
48
+ name=f'{self._name}.call_tool',
49
+ **self._step_config,
50
+ )
51
+ async def wrapped_call_tool_step(
52
+ name: str,
53
+ tool_args: dict[str, Any],
54
+ ctx: RunContext[AgentDepsT],
55
+ tool: ToolsetTool[AgentDepsT],
56
+ ) -> ToolResult:
57
+ return await super(DBOSMCPServer, self).call_tool(name, tool_args, ctx, tool)
58
+
59
+ self._dbos_wrapped_call_tool_step = wrapped_call_tool_step
60
+
61
+ @property
62
+ def id(self) -> str | None:
63
+ return self.wrapped.id
64
+
65
+ async def __aenter__(self) -> Self:
66
+ # The wrapped MCPServer enters itself around listing and calling tools
67
+ # so we don't need to enter it here (nor could we because we're not inside a DBOS step).
68
+ return self
69
+
70
+ async def __aexit__(self, *args: Any) -> bool | None:
71
+ return None
72
+
73
+ def visit_and_replace(
74
+ self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]]
75
+ ) -> AbstractToolset[AgentDepsT]:
76
+ # DBOS-ified toolsets cannot be swapped out after the fact.
77
+ return self
78
+
79
+ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
80
+ return await self._dbos_wrapped_get_tools_step(ctx)
81
+
82
+ async def call_tool(
83
+ self,
84
+ name: str,
85
+ tool_args: dict[str, Any],
86
+ ctx: RunContext[AgentDepsT],
87
+ tool: ToolsetTool[AgentDepsT],
88
+ ) -> ToolResult:
89
+ return await self._dbos_wrapped_call_tool_step(name, tool_args, ctx, tool)
@@ -0,0 +1,137 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import AsyncIterator
4
+ from contextlib import asynccontextmanager
5
+ from datetime import datetime
6
+ from typing import Any
7
+
8
+ from dbos import DBOS
9
+
10
+ from pydantic_ai.agent import EventStreamHandler
11
+ from pydantic_ai.messages import (
12
+ ModelMessage,
13
+ ModelResponse,
14
+ ModelResponseStreamEvent,
15
+ )
16
+ from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
17
+ from pydantic_ai.models.wrapper import WrapperModel
18
+ from pydantic_ai.settings import ModelSettings
19
+ from pydantic_ai.tools import RunContext
20
+ from pydantic_ai.usage import RequestUsage
21
+
22
+ from ._utils import StepConfig
23
+
24
+
25
+ class DBOSStreamedResponse(StreamedResponse):
26
+ def __init__(self, model_request_parameters: ModelRequestParameters, response: ModelResponse):
27
+ super().__init__(model_request_parameters)
28
+ self.response = response
29
+
30
+ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
31
+ return
32
+ # noinspection PyUnreachableCode
33
+ yield
34
+
35
+ def get(self) -> ModelResponse:
36
+ return self.response
37
+
38
+ def usage(self) -> RequestUsage:
39
+ return self.response.usage # pragma: no cover
40
+
41
+ @property
42
+ def model_name(self) -> str:
43
+ return self.response.model_name or '' # pragma: no cover
44
+
45
+ @property
46
+ def provider_name(self) -> str:
47
+ return self.response.provider_name or '' # pragma: no cover
48
+
49
+ @property
50
+ def timestamp(self) -> datetime:
51
+ return self.response.timestamp # pragma: no cover
52
+
53
+
54
+ class DBOSModel(WrapperModel):
55
+ """A wrapper for Model that integrates with DBOS, turning request and request_stream to DBOS steps."""
56
+
57
+ def __init__(
58
+ self,
59
+ model: Model,
60
+ *,
61
+ step_name_prefix: str,
62
+ step_config: StepConfig,
63
+ event_stream_handler: EventStreamHandler[Any] | None = None,
64
+ ):
65
+ super().__init__(model)
66
+ self.step_config = step_config
67
+ self.event_stream_handler = event_stream_handler
68
+ self._step_name_prefix = step_name_prefix
69
+
70
+ # Wrap the request in a DBOS step.
71
+ @DBOS.step(
72
+ name=f'{self._step_name_prefix}__model.request',
73
+ **self.step_config,
74
+ )
75
+ async def wrapped_request_step(
76
+ messages: list[ModelMessage],
77
+ model_settings: ModelSettings | None,
78
+ model_request_parameters: ModelRequestParameters,
79
+ ) -> ModelResponse:
80
+ return await super(DBOSModel, self).request(messages, model_settings, model_request_parameters)
81
+
82
+ self._dbos_wrapped_request_step = wrapped_request_step
83
+
84
+ # Wrap the request_stream in a DBOS step.
85
+ @DBOS.step(
86
+ name=f'{self._step_name_prefix}__model.request_stream',
87
+ **self.step_config,
88
+ )
89
+ async def wrapped_request_stream_step(
90
+ messages: list[ModelMessage],
91
+ model_settings: ModelSettings | None,
92
+ model_request_parameters: ModelRequestParameters,
93
+ run_context: RunContext[Any] | None = None,
94
+ ) -> ModelResponse:
95
+ async with super(DBOSModel, self).request_stream(
96
+ messages, model_settings, model_request_parameters, run_context
97
+ ) as streamed_response:
98
+ if self.event_stream_handler is not None:
99
+ assert run_context is not None, (
100
+ 'A DBOS model cannot be used with `pydantic_ai.direct.model_request_stream()` as it requires a `run_context`. Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
101
+ )
102
+ await self.event_stream_handler(run_context, streamed_response)
103
+
104
+ async for _ in streamed_response:
105
+ pass
106
+ return streamed_response.get()
107
+
108
+ self._dbos_wrapped_request_stream_step = wrapped_request_stream_step
109
+
110
+ async def request(
111
+ self,
112
+ messages: list[ModelMessage],
113
+ model_settings: ModelSettings | None,
114
+ model_request_parameters: ModelRequestParameters,
115
+ ) -> ModelResponse:
116
+ return await self._dbos_wrapped_request_step(messages, model_settings, model_request_parameters)
117
+
118
+ @asynccontextmanager
119
+ async def request_stream(
120
+ self,
121
+ messages: list[ModelMessage],
122
+ model_settings: ModelSettings | None,
123
+ model_request_parameters: ModelRequestParameters,
124
+ run_context: RunContext[Any] | None = None,
125
+ ) -> AsyncIterator[StreamedResponse]:
126
+ # If not in a workflow (could be in a step), just call the wrapped request_stream method.
127
+ if DBOS.workflow_id is None or DBOS.step_id is not None:
128
+ async with super().request_stream(
129
+ messages, model_settings, model_request_parameters, run_context
130
+ ) as streamed_response:
131
+ yield streamed_response
132
+ return
133
+
134
+ response = await self._dbos_wrapped_request_stream_step(
135
+ messages, model_settings, model_request_parameters, run_context
136
+ )
137
+ yield DBOSStreamedResponse(model_request_parameters, response)
@@ -0,0 +1,10 @@
1
+ from typing_extensions import TypedDict
2
+
3
+
4
+ class StepConfig(TypedDict, total=False):
5
+ """Configuration for a step in the DBOS workflow."""
6
+
7
+ retries_allowed: bool
8
+ interval_seconds: float
9
+ max_attempts: int
10
+ backoff_rate: float
@@ -1,14 +1,16 @@
1
1
  from __future__ import annotations
2
2
 
3
- from collections.abc import AsyncIterator, Callable, Iterator, Sequence
3
+ from collections.abc import AsyncIterable, AsyncIterator, Callable, Iterator, Sequence
4
4
  from contextlib import AbstractAsyncContextManager, asynccontextmanager, contextmanager
5
5
  from contextvars import ContextVar
6
+ from dataclasses import dataclass
6
7
  from datetime import timedelta
7
8
  from typing import Any, Literal, overload
8
9
 
10
+ from pydantic import ConfigDict, with_config
9
11
  from pydantic.errors import PydanticUserError
10
12
  from pydantic_core import PydanticSerializationError
11
- from temporalio import workflow
13
+ from temporalio import activity, workflow
12
14
  from temporalio.common import RetryPolicy
13
15
  from temporalio.workflow import ActivityConfig
14
16
  from typing_extensions import Never
@@ -21,7 +23,6 @@ from pydantic_ai import (
21
23
  )
22
24
  from pydantic_ai._run_context import AgentDepsT
23
25
  from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent
24
- from pydantic_ai.durable_exec.temporal._run_context import TemporalRunContext
25
26
  from pydantic_ai.exceptions import UserError
26
27
  from pydantic_ai.models import Model
27
28
  from pydantic_ai.output import OutputDataT, OutputSpec
@@ -29,15 +30,24 @@ from pydantic_ai.result import StreamedRunResult
29
30
  from pydantic_ai.settings import ModelSettings
30
31
  from pydantic_ai.tools import (
31
32
  DeferredToolResults,
33
+ RunContext,
32
34
  Tool,
33
35
  ToolFuncEither,
34
36
  )
35
37
  from pydantic_ai.toolsets import AbstractToolset
36
38
 
37
39
  from ._model import TemporalModel
40
+ from ._run_context import TemporalRunContext
38
41
  from ._toolset import TemporalWrapperToolset, temporalize_toolset
39
42
 
40
43
 
44
+ @dataclass
45
+ @with_config(ConfigDict(arbitrary_types_allowed=True))
46
+ class _EventStreamHandlerParams:
47
+ event: _messages.AgentStreamEvent
48
+ serialized_run_context: Any
49
+
50
+
41
51
  class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
42
52
  def __init__(
43
53
  self,
@@ -86,6 +96,10 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
86
96
  """
87
97
  super().__init__(wrapped)
88
98
 
99
+ self._name = name
100
+ self._event_stream_handler = event_stream_handler
101
+ self.run_context_type = run_context_type
102
+
89
103
  # start_to_close_timeout is required
90
104
  activity_config = activity_config or ActivityConfig(start_to_close_timeout=timedelta(seconds=60))
91
105
 
@@ -97,13 +111,13 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
97
111
  PydanticUserError.__name__,
98
112
  ]
99
113
  activity_config['retry_policy'] = retry_policy
114
+ self.activity_config = activity_config
100
115
 
101
116
  model_activity_config = model_activity_config or {}
102
117
  toolset_activity_config = toolset_activity_config or {}
103
118
  tool_activity_config = tool_activity_config or {}
104
119
 
105
- self._name = name or wrapped.name
106
- if self._name is None:
120
+ if self.name is None:
107
121
  raise UserError(
108
122
  "An agent needs to have a unique `name` in order to be used with Temporal. The name will be used to identify the agent's activities within the workflow."
109
123
  )
@@ -116,13 +130,33 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
116
130
  'An agent needs to have a `model` in order to be used with Temporal, it cannot be set at agent run time.'
117
131
  )
118
132
 
133
+ async def event_stream_handler_activity(params: _EventStreamHandlerParams, deps: AgentDepsT) -> None:
134
+ # We can never get here without an `event_stream_handler`, as `TemporalAgent.run_stream` and `TemporalAgent.iter` raise an error saying to use `TemporalAgent.run` instead,
135
+ # and that only ends up calling `event_stream_handler` if it is set.
136
+ assert self.event_stream_handler is not None
137
+
138
+ run_context = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps)
139
+
140
+ async def streamed_response():
141
+ yield params.event
142
+
143
+ await self.event_stream_handler(run_context, streamed_response())
144
+
145
+ # Set type hint explicitly so that Temporal can take care of serialization and deserialization
146
+ event_stream_handler_activity.__annotations__['deps'] = self.deps_type
147
+
148
+ self.event_stream_handler_activity = activity.defn(name=f'{activity_name_prefix}__event_stream_handler')(
149
+ event_stream_handler_activity
150
+ )
151
+ activities.append(self.event_stream_handler_activity)
152
+
119
153
  temporal_model = TemporalModel(
120
154
  wrapped.model,
121
155
  activity_name_prefix=activity_name_prefix,
122
156
  activity_config=activity_config | model_activity_config,
123
157
  deps_type=self.deps_type,
124
- run_context_type=run_context_type,
125
- event_stream_handler=event_stream_handler or wrapped.event_stream_handler,
158
+ run_context_type=self.run_context_type,
159
+ event_stream_handler=self.event_stream_handler,
126
160
  )
127
161
  activities.extend(temporal_model.temporal_activities)
128
162
 
@@ -139,7 +173,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
139
173
  activity_config | toolset_activity_config.get(id, {}),
140
174
  tool_activity_config.get(id, {}),
141
175
  self.deps_type,
142
- run_context_type,
176
+ self.run_context_type,
143
177
  )
144
178
  if isinstance(toolset, TemporalWrapperToolset):
145
179
  activities.extend(toolset.temporal_activities)
@@ -155,7 +189,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
155
189
 
156
190
  @property
157
191
  def name(self) -> str | None:
158
- return self._name
192
+ return self._name or super().name
159
193
 
160
194
  @name.setter
161
195
  def name(self, value: str | None) -> None: # pragma: no cover
@@ -167,6 +201,33 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
167
201
  def model(self) -> Model:
168
202
  return self._model
169
203
 
204
+ @property
205
+ def event_stream_handler(self) -> EventStreamHandler[AgentDepsT] | None:
206
+ handler = self._event_stream_handler or super().event_stream_handler
207
+ if handler is None:
208
+ return None
209
+ elif workflow.in_workflow():
210
+ return self._call_event_stream_handler_activity
211
+ else:
212
+ return handler
213
+
214
+ async def _call_event_stream_handler_activity(
215
+ self, ctx: RunContext[AgentDepsT], stream: AsyncIterable[_messages.AgentStreamEvent]
216
+ ) -> None:
217
+ serialized_run_context = self.run_context_type.serialize_run_context(ctx)
218
+ async for event in stream:
219
+ await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType]
220
+ activity=self.event_stream_handler_activity,
221
+ args=[
222
+ _EventStreamHandlerParams(
223
+ event=event,
224
+ serialized_run_context=serialized_run_context,
225
+ ),
226
+ ctx.deps,
227
+ ],
228
+ **self.activity_config,
229
+ )
230
+
170
231
  @property
171
232
  def toolsets(self) -> Sequence[AbstractToolset[AgentDepsT]]:
172
233
  with self._temporal_overrides():
@@ -296,7 +357,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
296
357
  usage=usage,
297
358
  infer_name=infer_name,
298
359
  toolsets=toolsets,
299
- event_stream_handler=event_stream_handler,
360
+ event_stream_handler=event_stream_handler or self.event_stream_handler,
300
361
  **_deprecated_kwargs,
301
362
  )
302
363
 
pydantic_ai/exceptions.py CHANGED
@@ -65,7 +65,7 @@ class ModelRetry(Exception):
65
65
  class CallDeferred(Exception):
66
66
  """Exception to raise when a tool call should be deferred.
67
67
 
68
- See [tools docs](../tools.md#deferred-tools) for more information.
68
+ See [tools docs](../deferred-tools.md#deferred-tools) for more information.
69
69
  """
70
70
 
71
71
  pass
@@ -74,7 +74,7 @@ class CallDeferred(Exception):
74
74
  class ApprovalRequired(Exception):
75
75
  """Exception to raise when a tool call requires human-in-the-loop approval.
76
76
 
77
- See [tools docs](../tools.md#human-in-the-loop-tool-approval) for more information.
77
+ See [tools docs](../deferred-tools.md#human-in-the-loop-tool-approval) for more information.
78
78
  """
79
79
 
80
80
  pass
pydantic_ai/mcp.py CHANGED
@@ -20,6 +20,7 @@ from typing_extensions import Self, assert_never, deprecated
20
20
 
21
21
  from pydantic_ai.tools import RunContext, ToolDefinition
22
22
 
23
+ from .direct import model_request
23
24
  from .toolsets.abstract import AbstractToolset, ToolsetTool
24
25
 
25
26
  try:
@@ -300,6 +301,8 @@ class MCPServer(AbstractToolset[Any], ABC):
300
301
  return self
301
302
 
302
303
  async def __aexit__(self, *args: Any) -> bool | None:
304
+ if self._running_count == 0:
305
+ raise ValueError('MCPServer.__aexit__ called more times than __aenter__')
303
306
  async with self._enter_lock:
304
307
  self._running_count -= 1
305
308
  if self._running_count == 0 and self._exit_stack is not None:
@@ -327,11 +330,7 @@ class MCPServer(AbstractToolset[Any], ABC):
327
330
  if stop_sequences := params.stopSequences: # pragma: no branch
328
331
  model_settings['stop_sequences'] = stop_sequences
329
332
 
330
- model_response = await self.sampling_model.request(
331
- pai_messages,
332
- model_settings,
333
- models.ModelRequestParameters(),
334
- )
333
+ model_response = await model_request(self.sampling_model, pai_messages, model_settings=model_settings)
335
334
  return mcp_types.CreateMessageResult(
336
335
  role='assistant',
337
336
  content=_mcp.map_from_model_response(model_response),
@@ -401,16 +400,7 @@ class MCPServerStdio(MCPServer):
401
400
  from pydantic_ai.mcp import MCPServerStdio
402
401
 
403
402
  server = MCPServerStdio( # (1)!
404
- 'deno',
405
- args=[
406
- 'run',
407
- '-N',
408
- '-R=node_modules',
409
- '-W=node_modules',
410
- '--node-modules-dir=auto',
411
- 'jsr:@pydantic/mcp-run-python',
412
- 'stdio',
413
- ]
403
+ 'uv', args=['run', 'mcp-run-python', 'stdio'], timeout=10
414
404
  )
415
405
  agent = Agent('openai:gpt-4o', toolsets=[server])
416
406
 
@@ -419,7 +409,7 @@ class MCPServerStdio(MCPServer):
419
409
  ...
420
410
  ```
421
411
 
422
- 1. See [MCP Run Python](../mcp/run-python.md) for more information.
412
+ 1. See [MCP Run Python](https://github.com/pydantic/mcp-run-python) for more information.
423
413
  2. This will start the server as a subprocess and connect to it.
424
414
  """
425
415
 
@@ -455,6 +445,7 @@ class MCPServerStdio(MCPServer):
455
445
  self,
456
446
  command: str,
457
447
  args: Sequence[str],
448
+ *,
458
449
  env: dict[str, str] | None = None,
459
450
  cwd: str | Path | None = None,
460
451
  tool_prefix: str | None = None,
@@ -467,7 +458,6 @@ class MCPServerStdio(MCPServer):
467
458
  sampling_model: models.Model | None = None,
468
459
  max_retries: int = 1,
469
460
  elicitation_callback: ElicitationFnT | None = None,
470
- *,
471
461
  id: str | None = None,
472
462
  ):
473
463
  """Build a new MCP server.
@@ -527,7 +517,7 @@ class MCPServerStdio(MCPServer):
527
517
  f'args={self.args!r}',
528
518
  ]
529
519
  if self.id:
530
- repr_args.append(f'id={self.id!r}') # pragma: no cover
520
+ repr_args.append(f'id={self.id!r}')
531
521
  return f'{self.__class__.__name__}({", ".join(repr_args)})'
532
522
 
533
523
 
@@ -581,8 +571,8 @@ class _MCPServerHTTP(MCPServer):
581
571
 
582
572
  def __init__(
583
573
  self,
584
- *,
585
574
  url: str,
575
+ *,
586
576
  headers: dict[str, str] | None = None,
587
577
  http_client: httpx.AsyncClient | None = None,
588
578
  id: str | None = None,
@@ -732,16 +722,15 @@ class MCPServerSSE(_MCPServerHTTP):
732
722
  from pydantic_ai import Agent
733
723
  from pydantic_ai.mcp import MCPServerSSE
734
724
 
735
- server = MCPServerSSE('http://localhost:3001/sse') # (1)!
725
+ server = MCPServerSSE('http://localhost:3001/sse')
736
726
  agent = Agent('openai:gpt-4o', toolsets=[server])
737
727
 
738
728
  async def main():
739
- async with agent: # (2)!
729
+ async with agent: # (1)!
740
730
  ...
741
731
  ```
742
732
 
743
- 1. E.g. you might be connecting to a server run with [`mcp-run-python`](../mcp/run-python.md).
744
- 2. This will connect to a server running on `localhost:3001`.
733
+ 1. This will connect to a server running on `localhost:3001`.
745
734
  """
746
735
 
747
736
  @property
@@ -765,7 +754,7 @@ class MCPServerHTTP(MCPServerSSE):
765
754
  from pydantic_ai import Agent
766
755
  from pydantic_ai.mcp import MCPServerHTTP
767
756
 
768
- server = MCPServerHTTP('http://localhost:3001/sse') # (1)!
757
+ server = MCPServerHTTP('http://localhost:3001/sse')
769
758
  agent = Agent('openai:gpt-4o', toolsets=[server])
770
759
 
771
760
  async def main():
@@ -773,8 +762,7 @@ class MCPServerHTTP(MCPServerSSE):
773
762
  ...
774
763
  ```
775
764
 
776
- 1. E.g. you might be connecting to a server run with [`mcp-run-python`](../mcp/run-python.md).
777
- 2. This will connect to a server running on `localhost:3001`.
765
+ 1. This will connect to a server running on `localhost:3001`.
778
766
  """
779
767
 
780
768