pydantic-ai-slim 1.0.14__py3-none-any.whl → 1.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (40) hide show
  1. pydantic_ai/__init__.py +19 -1
  2. pydantic_ai/_agent_graph.py +129 -105
  3. pydantic_ai/_cli.py +7 -10
  4. pydantic_ai/_output.py +236 -192
  5. pydantic_ai/_parts_manager.py +8 -42
  6. pydantic_ai/_tool_manager.py +9 -16
  7. pydantic_ai/agent/__init__.py +18 -7
  8. pydantic_ai/agent/abstract.py +192 -23
  9. pydantic_ai/agent/wrapper.py +7 -4
  10. pydantic_ai/builtin_tools.py +82 -0
  11. pydantic_ai/direct.py +16 -9
  12. pydantic_ai/durable_exec/dbos/_agent.py +124 -18
  13. pydantic_ai/durable_exec/temporal/_agent.py +139 -19
  14. pydantic_ai/durable_exec/temporal/_model.py +8 -0
  15. pydantic_ai/format_prompt.py +9 -6
  16. pydantic_ai/mcp.py +20 -10
  17. pydantic_ai/messages.py +214 -44
  18. pydantic_ai/models/__init__.py +15 -1
  19. pydantic_ai/models/anthropic.py +27 -22
  20. pydantic_ai/models/cohere.py +4 -0
  21. pydantic_ai/models/function.py +7 -4
  22. pydantic_ai/models/gemini.py +8 -0
  23. pydantic_ai/models/google.py +56 -23
  24. pydantic_ai/models/groq.py +11 -5
  25. pydantic_ai/models/huggingface.py +5 -3
  26. pydantic_ai/models/mistral.py +6 -8
  27. pydantic_ai/models/openai.py +206 -58
  28. pydantic_ai/models/test.py +4 -0
  29. pydantic_ai/output.py +5 -2
  30. pydantic_ai/profiles/__init__.py +2 -0
  31. pydantic_ai/profiles/google.py +5 -2
  32. pydantic_ai/profiles/openai.py +2 -1
  33. pydantic_ai/result.py +51 -35
  34. pydantic_ai/run.py +35 -7
  35. pydantic_ai/usage.py +40 -5
  36. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/METADATA +4 -4
  37. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/RECORD +40 -40
  38. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/WHEEL +0 -0
  39. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/entry_points.txt +0 -0
  40. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -17,6 +17,7 @@ from typing_extensions import Never
17
17
 
18
18
  from pydantic_ai import (
19
19
  AbstractToolset,
20
+ AgentRunResultEvent,
20
21
  _utils,
21
22
  messages as _messages,
22
23
  models,
@@ -258,7 +259,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
258
259
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
259
260
  *,
260
261
  output_type: None = None,
261
- message_history: list[_messages.ModelMessage] | None = None,
262
+ message_history: Sequence[_messages.ModelMessage] | None = None,
262
263
  deferred_tool_results: DeferredToolResults | None = None,
263
264
  model: models.Model | models.KnownModelName | str | None = None,
264
265
  deps: AgentDepsT = None,
@@ -276,7 +277,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
276
277
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
277
278
  *,
278
279
  output_type: OutputSpec[RunOutputDataT],
279
- message_history: list[_messages.ModelMessage] | None = None,
280
+ message_history: Sequence[_messages.ModelMessage] | None = None,
280
281
  deferred_tool_results: DeferredToolResults | None = None,
281
282
  model: models.Model | models.KnownModelName | str | None = None,
282
283
  deps: AgentDepsT = None,
@@ -293,7 +294,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
293
294
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
294
295
  *,
295
296
  output_type: OutputSpec[RunOutputDataT] | None = None,
296
- message_history: list[_messages.ModelMessage] | None = None,
297
+ message_history: Sequence[_messages.ModelMessage] | None = None,
297
298
  deferred_tool_results: DeferredToolResults | None = None,
298
299
  model: models.Model | models.KnownModelName | str | None = None,
299
300
  deps: AgentDepsT = None,
@@ -368,7 +369,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
368
369
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
369
370
  *,
370
371
  output_type: None = None,
371
- message_history: list[_messages.ModelMessage] | None = None,
372
+ message_history: Sequence[_messages.ModelMessage] | None = None,
372
373
  deferred_tool_results: DeferredToolResults | None = None,
373
374
  model: models.Model | models.KnownModelName | str | None = None,
374
375
  deps: AgentDepsT = None,
@@ -386,7 +387,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
386
387
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
387
388
  *,
388
389
  output_type: OutputSpec[RunOutputDataT],
389
- message_history: list[_messages.ModelMessage] | None = None,
390
+ message_history: Sequence[_messages.ModelMessage] | None = None,
390
391
  deferred_tool_results: DeferredToolResults | None = None,
391
392
  model: models.Model | models.KnownModelName | str | None = None,
392
393
  deps: AgentDepsT = None,
@@ -403,7 +404,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
403
404
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
404
405
  *,
405
406
  output_type: OutputSpec[RunOutputDataT] | None = None,
406
- message_history: list[_messages.ModelMessage] | None = None,
407
+ message_history: Sequence[_messages.ModelMessage] | None = None,
407
408
  deferred_tool_results: DeferredToolResults | None = None,
408
409
  model: models.Model | models.KnownModelName | str | None = None,
409
410
  deps: AgentDepsT = None,
@@ -476,7 +477,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
476
477
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
477
478
  *,
478
479
  output_type: None = None,
479
- message_history: list[_messages.ModelMessage] | None = None,
480
+ message_history: Sequence[_messages.ModelMessage] | None = None,
480
481
  deferred_tool_results: DeferredToolResults | None = None,
481
482
  model: models.Model | models.KnownModelName | str | None = None,
482
483
  deps: AgentDepsT = None,
@@ -494,7 +495,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
494
495
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
495
496
  *,
496
497
  output_type: OutputSpec[RunOutputDataT],
497
- message_history: list[_messages.ModelMessage] | None = None,
498
+ message_history: Sequence[_messages.ModelMessage] | None = None,
498
499
  deferred_tool_results: DeferredToolResults | None = None,
499
500
  model: models.Model | models.KnownModelName | str | None = None,
500
501
  deps: AgentDepsT = None,
@@ -512,7 +513,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
512
513
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
513
514
  *,
514
515
  output_type: OutputSpec[RunOutputDataT] | None = None,
515
- message_history: list[_messages.ModelMessage] | None = None,
516
+ message_history: Sequence[_messages.ModelMessage] | None = None,
516
517
  deferred_tool_results: DeferredToolResults | None = None,
517
518
  model: models.Model | models.KnownModelName | str | None = None,
518
519
  deps: AgentDepsT = None,
@@ -558,9 +559,8 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
558
559
  """
559
560
  if workflow.in_workflow():
560
561
  raise UserError(
561
- '`agent.run_stream()` cannot currently be used inside a Temporal workflow. '
562
- 'Set an `event_stream_handler` on the agent and use `agent.run()` instead. '
563
- 'Please file an issue if this is not sufficient for your use case.'
562
+ '`agent.run_stream()` cannot be used inside a Temporal workflow. '
563
+ 'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
564
564
  )
565
565
 
566
566
  async with super().run_stream(
@@ -580,13 +580,131 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
580
580
  ) as result:
581
581
  yield result
582
582
 
583
+ @overload
584
+ def run_stream_events(
585
+ self,
586
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
587
+ *,
588
+ output_type: None = None,
589
+ message_history: Sequence[_messages.ModelMessage] | None = None,
590
+ deferred_tool_results: DeferredToolResults | None = None,
591
+ model: models.Model | models.KnownModelName | str | None = None,
592
+ deps: AgentDepsT = None,
593
+ model_settings: ModelSettings | None = None,
594
+ usage_limits: _usage.UsageLimits | None = None,
595
+ usage: _usage.RunUsage | None = None,
596
+ infer_name: bool = True,
597
+ toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
598
+ ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[OutputDataT]]: ...
599
+
600
+ @overload
601
+ def run_stream_events(
602
+ self,
603
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
604
+ *,
605
+ output_type: OutputSpec[RunOutputDataT],
606
+ message_history: Sequence[_messages.ModelMessage] | None = None,
607
+ deferred_tool_results: DeferredToolResults | None = None,
608
+ model: models.Model | models.KnownModelName | str | None = None,
609
+ deps: AgentDepsT = None,
610
+ model_settings: ModelSettings | None = None,
611
+ usage_limits: _usage.UsageLimits | None = None,
612
+ usage: _usage.RunUsage | None = None,
613
+ infer_name: bool = True,
614
+ toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
615
+ ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[RunOutputDataT]]: ...
616
+
617
+ def run_stream_events(
618
+ self,
619
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
620
+ *,
621
+ output_type: OutputSpec[RunOutputDataT] | None = None,
622
+ message_history: Sequence[_messages.ModelMessage] | None = None,
623
+ deferred_tool_results: DeferredToolResults | None = None,
624
+ model: models.Model | models.KnownModelName | str | None = None,
625
+ deps: AgentDepsT = None,
626
+ model_settings: ModelSettings | None = None,
627
+ usage_limits: _usage.UsageLimits | None = None,
628
+ usage: _usage.RunUsage | None = None,
629
+ infer_name: bool = True,
630
+ toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
631
+ ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]:
632
+ """Run the agent with a user prompt in async mode and stream events from the run.
633
+
634
+ This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] and
635
+ uses the `event_stream_handler` kwarg to get a stream of events from the run.
636
+
637
+ Example:
638
+ ```python
639
+ from pydantic_ai import Agent, AgentRunResultEvent, AgentStreamEvent
640
+
641
+ agent = Agent('openai:gpt-4o')
642
+
643
+ async def main():
644
+ events: list[AgentStreamEvent | AgentRunResultEvent] = []
645
+ async for event in agent.run_stream_events('What is the capital of France?'):
646
+ events.append(event)
647
+ print(events)
648
+ '''
649
+ [
650
+ PartStartEvent(index=0, part=TextPart(content='The capital of ')),
651
+ FinalResultEvent(tool_name=None, tool_call_id=None),
652
+ PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')),
653
+ AgentRunResultEvent(
654
+ result=AgentRunResult(output='The capital of France is Paris. ')
655
+ ),
656
+ ]
657
+ '''
658
+ ```
659
+
660
+ Arguments are the same as for [`self.run`][pydantic_ai.agent.AbstractAgent.run],
661
+ except that `event_stream_handler` is now allowed.
662
+
663
+ Args:
664
+ user_prompt: User input to start/continue the conversation.
665
+ output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
666
+ output validators since output validators would expect an argument that matches the agent's output type.
667
+ message_history: History of the conversation so far.
668
+ deferred_tool_results: Optional results for deferred tool calls in the message history.
669
+ model: Optional model to use for this run, required if `model` was not set when creating the agent.
670
+ deps: Optional dependencies to use for this run.
671
+ model_settings: Optional settings to use for this model's request.
672
+ usage_limits: Optional limits on model request count or token usage.
673
+ usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
674
+ infer_name: Whether to try to infer the agent name from the call frame if it's not set.
675
+ toolsets: Optional additional toolsets for this run.
676
+
677
+ Returns:
678
+ An async iterable of stream events `AgentStreamEvent` and finally a `AgentRunResultEvent` with the final
679
+ run result.
680
+ """
681
+ if workflow.in_workflow():
682
+ raise UserError(
683
+ '`agent.run_stream_events()` cannot be used inside a Temporal workflow. '
684
+ 'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
685
+ )
686
+
687
+ return super().run_stream_events(
688
+ user_prompt,
689
+ output_type=output_type,
690
+ message_history=message_history,
691
+ deferred_tool_results=deferred_tool_results,
692
+ model=model,
693
+ deps=deps,
694
+ model_settings=model_settings,
695
+ usage_limits=usage_limits,
696
+ usage=usage,
697
+ infer_name=infer_name,
698
+ toolsets=toolsets,
699
+ )
700
+
583
701
  @overload
584
702
  def iter(
585
703
  self,
586
704
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
587
705
  *,
588
706
  output_type: None = None,
589
- message_history: list[_messages.ModelMessage] | None = None,
707
+ message_history: Sequence[_messages.ModelMessage] | None = None,
590
708
  deferred_tool_results: DeferredToolResults | None = None,
591
709
  model: models.Model | models.KnownModelName | str | None = None,
592
710
  deps: AgentDepsT = None,
@@ -604,7 +722,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
604
722
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
605
723
  *,
606
724
  output_type: OutputSpec[RunOutputDataT],
607
- message_history: list[_messages.ModelMessage] | None = None,
725
+ message_history: Sequence[_messages.ModelMessage] | None = None,
608
726
  deferred_tool_results: DeferredToolResults | None = None,
609
727
  model: models.Model | models.KnownModelName | str | None = None,
610
728
  deps: AgentDepsT = None,
@@ -622,7 +740,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
622
740
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
623
741
  *,
624
742
  output_type: OutputSpec[RunOutputDataT] | None = None,
625
- message_history: list[_messages.ModelMessage] | None = None,
743
+ message_history: Sequence[_messages.ModelMessage] | None = None,
626
744
  deferred_tool_results: DeferredToolResults | None = None,
627
745
  model: models.Model | models.KnownModelName | str | None = None,
628
746
  deps: AgentDepsT = None,
@@ -711,9 +829,8 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
711
829
  if workflow.in_workflow():
712
830
  if not self._temporal_overrides_active.get():
713
831
  raise UserError(
714
- '`agent.iter()` cannot currently be used inside a Temporal workflow. '
715
- 'Set an `event_stream_handler` on the agent and use `agent.run()` instead. '
716
- 'Please file an issue if this is not sufficient for your use case.'
832
+ '`agent.iter()` cannot be used inside a Temporal workflow. '
833
+ 'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
717
834
  )
718
835
 
719
836
  if model is not None:
@@ -745,18 +862,20 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
745
862
  def override(
746
863
  self,
747
864
  *,
865
+ name: str | _utils.Unset = _utils.UNSET,
748
866
  deps: AgentDepsT | _utils.Unset = _utils.UNSET,
749
867
  model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
750
868
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
751
869
  tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
752
870
  instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
753
871
  ) -> Iterator[None]:
754
- """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
872
+ """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions.
755
873
 
756
874
  This is particularly useful when testing.
757
875
  You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
758
876
 
759
877
  Args:
878
+ name: The name to use instead of the name passed to the agent constructor and agent run.
760
879
  deps: The dependencies to use instead of the dependencies passed to the agent run.
761
880
  model: The model to use instead of the model passed to the agent run.
762
881
  toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
@@ -778,6 +897,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
778
897
  )
779
898
 
780
899
  with super().override(
900
+ name=name,
781
901
  deps=deps,
782
902
  model=model,
783
903
  toolsets=toolsets,
@@ -128,6 +128,8 @@ class TemporalModel(WrapperModel):
128
128
  if not workflow.in_workflow():
129
129
  return await super().request(messages, model_settings, model_request_parameters)
130
130
 
131
+ self._validate_model_request_parameters(model_request_parameters)
132
+
131
133
  return await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType]
132
134
  activity=self.request_activity,
133
135
  arg=_RequestParams(
@@ -163,6 +165,8 @@ class TemporalModel(WrapperModel):
163
165
  # and that only calls `request_stream` if `event_stream_handler` is set.
164
166
  assert self.event_stream_handler is not None
165
167
 
168
+ self._validate_model_request_parameters(model_request_parameters)
169
+
166
170
  serialized_run_context = self.run_context_type.serialize_run_context(run_context)
167
171
  response = await workflow.execute_activity( # pyright: ignore[reportUnknownMemberType]
168
172
  activity=self.request_stream_activity,
@@ -178,3 +182,7 @@ class TemporalModel(WrapperModel):
178
182
  **self.activity_config,
179
183
  )
180
184
  yield TemporalStreamedResponse(model_request_parameters, response)
185
+
186
+ def _validate_model_request_parameters(self, model_request_parameters: ModelRequestParameters) -> None:
187
+ if model_request_parameters.allow_image_output:
188
+ raise UserError('Image output is not supported with Temporal because of the 2MB payload size limit.')
@@ -2,7 +2,8 @@ from __future__ import annotations as _annotations
2
2
 
3
3
  from collections.abc import Iterable, Iterator, Mapping
4
4
  from dataclasses import asdict, dataclass, field, fields, is_dataclass
5
- from datetime import date
5
+ from datetime import date, time, timedelta
6
+ from enum import Enum
6
7
  from typing import Any, Literal
7
8
  from xml.etree import ElementTree
8
9
 
@@ -26,8 +27,8 @@ def format_as_xml(
26
27
  This is useful since LLMs often find it easier to read semi-structured data (e.g. examples) as XML,
27
28
  rather than JSON etc.
28
29
 
29
- Supports: `str`, `bytes`, `bytearray`, `bool`, `int`, `float`, `date`, `datetime`, `Mapping`,
30
- `Iterable`, `dataclass`, and `BaseModel`.
30
+ Supports: `str`, `bytes`, `bytearray`, `bool`, `int`, `float`, `date`, `datetime`, `time`, `timedelta`, `Enum`,
31
+ `Mapping`, `Iterable`, `dataclass`, and `BaseModel`.
31
32
 
32
33
  Args:
33
34
  obj: Python Object to serialize to XML.
@@ -101,10 +102,12 @@ class _ToXml:
101
102
  element.text = value
102
103
  elif isinstance(value, bytes | bytearray):
103
104
  element.text = value.decode(errors='ignore')
104
- elif isinstance(value, bool | int | float):
105
+ elif isinstance(value, bool | int | float | Enum):
105
106
  element.text = str(value)
106
- elif isinstance(value, date):
107
+ elif isinstance(value, date | time):
107
108
  element.text = value.isoformat()
109
+ elif isinstance(value, timedelta):
110
+ element.text = str(value)
108
111
  elif isinstance(value, Mapping):
109
112
  if tag is None and path in self._element_names:
110
113
  element.tag = self._element_names[path]
@@ -164,7 +167,7 @@ class _ToXml:
164
167
  path: str = '',
165
168
  ):
166
169
  """Parse data structures as dataclasses or Pydantic models to extract element names and attributes."""
167
- if value is None or isinstance(value, (str | int | float | date | bytearray | bytes | bool)):
170
+ if value is None or isinstance(value, (str | int | float | date | time | timedelta | bytearray | bytes | bool)):
168
171
  return
169
172
  elif isinstance(value, Mapping):
170
173
  for k, v in value.items(): # pyright: ignore[reportUnknownVariableType]
pydantic_ai/mcp.py CHANGED
@@ -167,6 +167,10 @@ class MCPServer(AbstractToolset[Any], ABC):
167
167
  def id(self) -> str | None:
168
168
  return self._id
169
169
 
170
+ @id.setter
171
+ def id(self, value: str | None):
172
+ self._id = value
173
+
170
174
  @property
171
175
  def label(self) -> str:
172
176
  if self.id:
@@ -414,6 +418,9 @@ class MCPServer(AbstractToolset[Any], ABC):
414
418
  else:
415
419
  assert_never(resource)
416
420
 
421
+ def __eq__(self, value: object, /) -> bool:
422
+ return isinstance(value, MCPServer) and self.id == value.id and self.tool_prefix == value.tool_prefix
423
+
417
424
 
418
425
  class MCPServerStdio(MCPServer):
419
426
  """Runs an MCP server in a subprocess and communicates with it over stdin/stdout.
@@ -568,10 +575,10 @@ class MCPServerStdio(MCPServer):
568
575
  return f'{self.__class__.__name__}({", ".join(repr_args)})'
569
576
 
570
577
  def __eq__(self, value: object, /) -> bool:
571
- if not isinstance(value, MCPServerStdio):
572
- return False # pragma: no cover
573
578
  return (
574
- self.command == value.command
579
+ super().__eq__(value)
580
+ and isinstance(value, MCPServerStdio)
581
+ and self.command == value.command
575
582
  and self.args == value.args
576
583
  and self.env == value.env
577
584
  and self.cwd == value.cwd
@@ -809,9 +816,7 @@ class MCPServerSSE(_MCPServerHTTP):
809
816
  return sse_client # pragma: no cover
810
817
 
811
818
  def __eq__(self, value: object, /) -> bool:
812
- if not isinstance(value, MCPServerSSE):
813
- return False # pragma: no cover
814
- return self.url == value.url
819
+ return super().__eq__(value) and isinstance(value, MCPServerSSE) and self.url == value.url
815
820
 
816
821
 
817
822
  @deprecated('The `MCPServerHTTP` class is deprecated, use `MCPServerSSE` instead.')
@@ -885,9 +890,7 @@ class MCPServerStreamableHTTP(_MCPServerHTTP):
885
890
  return streamablehttp_client # pragma: no cover
886
891
 
887
892
  def __eq__(self, value: object, /) -> bool:
888
- if not isinstance(value, MCPServerStreamableHTTP):
889
- return False # pragma: no cover
890
- return self.url == value.url
893
+ return super().__eq__(value) and isinstance(value, MCPServerStreamableHTTP) and self.url == value.url
891
894
 
892
895
 
893
896
  ToolResult = (
@@ -964,4 +967,11 @@ def load_mcp_servers(config_path: str | Path) -> list[MCPServerStdio | MCPServer
964
967
  raise FileNotFoundError(f'Config file {config_path} not found')
965
968
 
966
969
  config = MCPServerConfig.model_validate_json(config_path.read_bytes())
967
- return list(config.mcp_servers.values())
970
+
971
+ servers: list[MCPServerStdio | MCPServerStreamableHTTP | MCPServerSSE] = []
972
+ for name, server in config.mcp_servers.items():
973
+ server.id = name
974
+ server.tool_prefix = name
975
+ servers.append(server)
976
+
977
+ return servers