pydantic-ai-slim 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/_parts_manager.py +3 -1
- pydantic_ai/builtin_tools.py +18 -9
- pydantic_ai/mcp.py +115 -2
- pydantic_ai/messages.py +3 -0
- pydantic_ai/models/cohere.py +2 -2
- pydantic_ai/models/openai.py +140 -35
- {pydantic_ai_slim-1.0.4.dist-info → pydantic_ai_slim-1.0.6.dist-info}/METADATA +4 -4
- {pydantic_ai_slim-1.0.4.dist-info → pydantic_ai_slim-1.0.6.dist-info}/RECORD +11 -11
- {pydantic_ai_slim-1.0.4.dist-info → pydantic_ai_slim-1.0.6.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.4.dist-info → pydantic_ai_slim-1.0.6.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.4.dist-info → pydantic_ai_slim-1.0.6.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/_parts_manager.py
CHANGED
|
@@ -71,6 +71,7 @@ class ModelResponsePartsManager:
|
|
|
71
71
|
*,
|
|
72
72
|
vendor_part_id: VendorId | None,
|
|
73
73
|
content: str,
|
|
74
|
+
id: str | None = None,
|
|
74
75
|
thinking_tags: tuple[str, str] | None = None,
|
|
75
76
|
ignore_leading_whitespace: bool = False,
|
|
76
77
|
) -> ModelResponseStreamEvent | None:
|
|
@@ -85,6 +86,7 @@ class ModelResponsePartsManager:
|
|
|
85
86
|
of text. If None, a new part will be created unless the latest part is already
|
|
86
87
|
a TextPart.
|
|
87
88
|
content: The text content to append to the appropriate TextPart.
|
|
89
|
+
id: An optional id for the text part.
|
|
88
90
|
thinking_tags: If provided, will handle content between the thinking tags as thinking parts.
|
|
89
91
|
ignore_leading_whitespace: If True, will ignore leading whitespace in the content.
|
|
90
92
|
|
|
@@ -137,7 +139,7 @@ class ModelResponsePartsManager:
|
|
|
137
139
|
|
|
138
140
|
# There is no existing text part that should be updated, so create a new one
|
|
139
141
|
new_part_index = len(self._parts)
|
|
140
|
-
part = TextPart(content=content)
|
|
142
|
+
part = TextPart(content=content, id=id)
|
|
141
143
|
if vendor_part_id is not None:
|
|
142
144
|
self._vendor_id_to_part_index[vendor_part_id] = new_part_index
|
|
143
145
|
self._parts.append(part)
|
pydantic_ai/builtin_tools.py
CHANGED
|
@@ -26,8 +26,9 @@ class WebSearchTool(AbstractBuiltinTool):
|
|
|
26
26
|
The parameters that PydanticAI passes depend on the model, as some parameters may not be supported by certain models.
|
|
27
27
|
|
|
28
28
|
Supported by:
|
|
29
|
+
|
|
29
30
|
* Anthropic
|
|
30
|
-
* OpenAI
|
|
31
|
+
* OpenAI Responses
|
|
31
32
|
* Groq
|
|
32
33
|
* Google
|
|
33
34
|
"""
|
|
@@ -36,15 +37,17 @@ class WebSearchTool(AbstractBuiltinTool):
|
|
|
36
37
|
"""The `search_context_size` parameter controls how much context is retrieved from the web to help the tool formulate a response.
|
|
37
38
|
|
|
38
39
|
Supported by:
|
|
39
|
-
|
|
40
|
+
|
|
41
|
+
* OpenAI Responses
|
|
40
42
|
"""
|
|
41
43
|
|
|
42
44
|
user_location: WebSearchUserLocation | None = None
|
|
43
45
|
"""The `user_location` parameter allows you to localize search results based on a user's location.
|
|
44
46
|
|
|
45
47
|
Supported by:
|
|
48
|
+
|
|
46
49
|
* Anthropic
|
|
47
|
-
* OpenAI
|
|
50
|
+
* OpenAI Responses
|
|
48
51
|
"""
|
|
49
52
|
|
|
50
53
|
blocked_domains: list[str] | None = None
|
|
@@ -53,8 +56,9 @@ class WebSearchTool(AbstractBuiltinTool):
|
|
|
53
56
|
With Anthropic, you can only use one of `blocked_domains` or `allowed_domains`, not both.
|
|
54
57
|
|
|
55
58
|
Supported by:
|
|
56
|
-
|
|
57
|
-
*
|
|
59
|
+
|
|
60
|
+
* Anthropic, see <https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering>
|
|
61
|
+
* Groq, see <https://console.groq.com/docs/agentic-tooling#search-settings>
|
|
58
62
|
"""
|
|
59
63
|
|
|
60
64
|
allowed_domains: list[str] | None = None
|
|
@@ -63,14 +67,16 @@ class WebSearchTool(AbstractBuiltinTool):
|
|
|
63
67
|
With Anthropic, you can only use one of `blocked_domains` or `allowed_domains`, not both.
|
|
64
68
|
|
|
65
69
|
Supported by:
|
|
66
|
-
|
|
67
|
-
*
|
|
70
|
+
|
|
71
|
+
* Anthropic, see <https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering>
|
|
72
|
+
* Groq, see <https://console.groq.com/docs/agentic-tooling#search-settings>
|
|
68
73
|
"""
|
|
69
74
|
|
|
70
75
|
max_uses: int | None = None
|
|
71
76
|
"""If provided, the tool will stop searching the web after the given number of uses.
|
|
72
77
|
|
|
73
78
|
Supported by:
|
|
79
|
+
|
|
74
80
|
* Anthropic
|
|
75
81
|
"""
|
|
76
82
|
|
|
@@ -79,8 +85,9 @@ class WebSearchUserLocation(TypedDict, total=False):
|
|
|
79
85
|
"""Allows you to localize search results based on a user's location.
|
|
80
86
|
|
|
81
87
|
Supported by:
|
|
88
|
+
|
|
82
89
|
* Anthropic
|
|
83
|
-
* OpenAI
|
|
90
|
+
* OpenAI Responses
|
|
84
91
|
"""
|
|
85
92
|
|
|
86
93
|
city: str
|
|
@@ -100,8 +107,9 @@ class CodeExecutionTool(AbstractBuiltinTool):
|
|
|
100
107
|
"""A builtin tool that allows your agent to execute code.
|
|
101
108
|
|
|
102
109
|
Supported by:
|
|
110
|
+
|
|
103
111
|
* Anthropic
|
|
104
|
-
* OpenAI
|
|
112
|
+
* OpenAI Responses
|
|
105
113
|
* Google
|
|
106
114
|
"""
|
|
107
115
|
|
|
@@ -110,5 +118,6 @@ class UrlContextTool(AbstractBuiltinTool):
|
|
|
110
118
|
"""Allows your agent to access contents from URLs.
|
|
111
119
|
|
|
112
120
|
Supported by:
|
|
121
|
+
|
|
113
122
|
* Google
|
|
114
123
|
"""
|
pydantic_ai/mcp.py
CHANGED
|
@@ -10,12 +10,14 @@ from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontext
|
|
|
10
10
|
from dataclasses import field, replace
|
|
11
11
|
from datetime import timedelta
|
|
12
12
|
from pathlib import Path
|
|
13
|
-
from typing import Any
|
|
13
|
+
from typing import Annotated, Any
|
|
14
14
|
|
|
15
15
|
import anyio
|
|
16
16
|
import httpx
|
|
17
17
|
import pydantic_core
|
|
18
18
|
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
|
19
|
+
from pydantic import BaseModel, Discriminator, Field, Tag
|
|
20
|
+
from pydantic_core import CoreSchema, core_schema
|
|
19
21
|
from typing_extensions import Self, assert_never, deprecated
|
|
20
22
|
|
|
21
23
|
from pydantic_ai.tools import RunContext, ToolDefinition
|
|
@@ -41,7 +43,7 @@ except ImportError as _import_error:
|
|
|
41
43
|
# after mcp imports so any import error maps to this file, not _mcp.py
|
|
42
44
|
from . import _mcp, _utils, exceptions, messages, models
|
|
43
45
|
|
|
44
|
-
__all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP'
|
|
46
|
+
__all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP', 'load_mcp_servers'
|
|
45
47
|
|
|
46
48
|
TOOL_SCHEMA_VALIDATOR = pydantic_core.SchemaValidator(
|
|
47
49
|
schema=pydantic_core.core_schema.dict_schema(
|
|
@@ -498,6 +500,22 @@ class MCPServerStdio(MCPServer):
|
|
|
498
500
|
id=id,
|
|
499
501
|
)
|
|
500
502
|
|
|
503
|
+
@classmethod
|
|
504
|
+
def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
|
|
505
|
+
return core_schema.no_info_after_validator_function(
|
|
506
|
+
lambda dct: MCPServerStdio(**dct),
|
|
507
|
+
core_schema.typed_dict_schema(
|
|
508
|
+
{
|
|
509
|
+
'command': core_schema.typed_dict_field(core_schema.str_schema()),
|
|
510
|
+
'args': core_schema.typed_dict_field(core_schema.list_schema(core_schema.str_schema())),
|
|
511
|
+
'env': core_schema.typed_dict_field(
|
|
512
|
+
core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()),
|
|
513
|
+
required=False,
|
|
514
|
+
),
|
|
515
|
+
}
|
|
516
|
+
),
|
|
517
|
+
)
|
|
518
|
+
|
|
501
519
|
@asynccontextmanager
|
|
502
520
|
async def client_streams(
|
|
503
521
|
self,
|
|
@@ -520,6 +538,16 @@ class MCPServerStdio(MCPServer):
|
|
|
520
538
|
repr_args.append(f'id={self.id!r}')
|
|
521
539
|
return f'{self.__class__.__name__}({", ".join(repr_args)})'
|
|
522
540
|
|
|
541
|
+
def __eq__(self, value: object, /) -> bool:
|
|
542
|
+
if not isinstance(value, MCPServerStdio):
|
|
543
|
+
return False # pragma: no cover
|
|
544
|
+
return (
|
|
545
|
+
self.command == value.command
|
|
546
|
+
and self.args == value.args
|
|
547
|
+
and self.env == value.env
|
|
548
|
+
and self.cwd == value.cwd
|
|
549
|
+
)
|
|
550
|
+
|
|
523
551
|
|
|
524
552
|
class _MCPServerHTTP(MCPServer):
|
|
525
553
|
url: str
|
|
@@ -733,10 +761,29 @@ class MCPServerSSE(_MCPServerHTTP):
|
|
|
733
761
|
1. This will connect to a server running on `localhost:3001`.
|
|
734
762
|
"""
|
|
735
763
|
|
|
764
|
+
@classmethod
|
|
765
|
+
def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
|
|
766
|
+
return core_schema.no_info_after_validator_function(
|
|
767
|
+
lambda dct: MCPServerSSE(**dct),
|
|
768
|
+
core_schema.typed_dict_schema(
|
|
769
|
+
{
|
|
770
|
+
'url': core_schema.typed_dict_field(core_schema.str_schema()),
|
|
771
|
+
'headers': core_schema.typed_dict_field(
|
|
772
|
+
core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()), required=False
|
|
773
|
+
),
|
|
774
|
+
}
|
|
775
|
+
),
|
|
776
|
+
)
|
|
777
|
+
|
|
736
778
|
@property
|
|
737
779
|
def _transport_client(self):
|
|
738
780
|
return sse_client # pragma: no cover
|
|
739
781
|
|
|
782
|
+
def __eq__(self, value: object, /) -> bool:
|
|
783
|
+
if not isinstance(value, MCPServerSSE):
|
|
784
|
+
return False # pragma: no cover
|
|
785
|
+
return self.url == value.url
|
|
786
|
+
|
|
740
787
|
|
|
741
788
|
@deprecated('The `MCPServerHTTP` class is deprecated, use `MCPServerSSE` instead.')
|
|
742
789
|
class MCPServerHTTP(MCPServerSSE):
|
|
@@ -790,10 +837,29 @@ class MCPServerStreamableHTTP(_MCPServerHTTP):
|
|
|
790
837
|
```
|
|
791
838
|
"""
|
|
792
839
|
|
|
840
|
+
@classmethod
|
|
841
|
+
def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
|
|
842
|
+
return core_schema.no_info_after_validator_function(
|
|
843
|
+
lambda dct: MCPServerStreamableHTTP(**dct),
|
|
844
|
+
core_schema.typed_dict_schema(
|
|
845
|
+
{
|
|
846
|
+
'url': core_schema.typed_dict_field(core_schema.str_schema()),
|
|
847
|
+
'headers': core_schema.typed_dict_field(
|
|
848
|
+
core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()), required=False
|
|
849
|
+
),
|
|
850
|
+
}
|
|
851
|
+
),
|
|
852
|
+
)
|
|
853
|
+
|
|
793
854
|
@property
|
|
794
855
|
def _transport_client(self):
|
|
795
856
|
return streamablehttp_client # pragma: no cover
|
|
796
857
|
|
|
858
|
+
def __eq__(self, value: object, /) -> bool:
|
|
859
|
+
if not isinstance(value, MCPServerStreamableHTTP):
|
|
860
|
+
return False # pragma: no cover
|
|
861
|
+
return self.url == value.url
|
|
862
|
+
|
|
797
863
|
|
|
798
864
|
ToolResult = (
|
|
799
865
|
str
|
|
@@ -823,3 +889,50 @@ It accepts a run context, the original tool call function, a tool name, and argu
|
|
|
823
889
|
Allows wrapping an MCP server tool call to customize it, including adding extra request
|
|
824
890
|
metadata.
|
|
825
891
|
"""
|
|
892
|
+
|
|
893
|
+
|
|
894
|
+
def _mcp_server_discriminator(value: dict[str, Any]) -> str | None:
|
|
895
|
+
if 'url' in value:
|
|
896
|
+
if value['url'].endswith('/sse'):
|
|
897
|
+
return 'sse'
|
|
898
|
+
return 'streamable-http'
|
|
899
|
+
return 'stdio'
|
|
900
|
+
|
|
901
|
+
|
|
902
|
+
class MCPServerConfig(BaseModel):
|
|
903
|
+
"""Configuration for MCP servers."""
|
|
904
|
+
|
|
905
|
+
mcp_servers: Annotated[
|
|
906
|
+
dict[
|
|
907
|
+
str,
|
|
908
|
+
Annotated[
|
|
909
|
+
Annotated[MCPServerStdio, Tag('stdio')]
|
|
910
|
+
| Annotated[MCPServerStreamableHTTP, Tag('streamable-http')]
|
|
911
|
+
| Annotated[MCPServerSSE, Tag('sse')],
|
|
912
|
+
Discriminator(_mcp_server_discriminator),
|
|
913
|
+
],
|
|
914
|
+
],
|
|
915
|
+
Field(alias='mcpServers'),
|
|
916
|
+
]
|
|
917
|
+
|
|
918
|
+
|
|
919
|
+
def load_mcp_servers(config_path: str | Path) -> list[MCPServerStdio | MCPServerStreamableHTTP | MCPServerSSE]:
|
|
920
|
+
"""Load MCP servers from a configuration file.
|
|
921
|
+
|
|
922
|
+
Args:
|
|
923
|
+
config_path: The path to the configuration file.
|
|
924
|
+
|
|
925
|
+
Returns:
|
|
926
|
+
A list of MCP servers.
|
|
927
|
+
|
|
928
|
+
Raises:
|
|
929
|
+
FileNotFoundError: If the configuration file does not exist.
|
|
930
|
+
ValidationError: If the configuration file does not match the schema.
|
|
931
|
+
"""
|
|
932
|
+
config_path = Path(config_path)
|
|
933
|
+
|
|
934
|
+
if not config_path.exists():
|
|
935
|
+
raise FileNotFoundError(f'Config file {config_path} not found')
|
|
936
|
+
|
|
937
|
+
config = MCPServerConfig.model_validate_json(config_path.read_bytes())
|
|
938
|
+
return list(config.mcp_servers.values())
|
pydantic_ai/messages.py
CHANGED
pydantic_ai/models/cohere.py
CHANGED
|
@@ -207,7 +207,7 @@ class CohereModel(Model):
|
|
|
207
207
|
if content.type == 'text':
|
|
208
208
|
parts.append(TextPart(content=content.text))
|
|
209
209
|
elif content.type == 'thinking': # pragma: no branch
|
|
210
|
-
parts.append(ThinkingPart(content=
|
|
210
|
+
parts.append(ThinkingPart(content=content.thinking))
|
|
211
211
|
for c in response.message.tool_calls or []:
|
|
212
212
|
if c.function and c.function.name and c.function.arguments: # pragma: no branch
|
|
213
213
|
parts.append(
|
|
@@ -258,7 +258,7 @@ class CohereModel(Model):
|
|
|
258
258
|
if texts or thinking:
|
|
259
259
|
contents: list[AssistantMessageV2ContentItem] = []
|
|
260
260
|
if thinking:
|
|
261
|
-
contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking)))
|
|
261
|
+
contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking)))
|
|
262
262
|
if texts: # pragma: no branch
|
|
263
263
|
contents.append(TextAssistantMessageV2ContentItem(text='\n\n'.join(texts)))
|
|
264
264
|
message_param.content = contents
|
pydantic_ai/models/openai.py
CHANGED
|
@@ -190,10 +190,19 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
|
|
|
190
190
|
This can be useful for debugging and understanding the model's reasoning process.
|
|
191
191
|
One of `concise` or `detailed`.
|
|
192
192
|
|
|
193
|
-
Check the [OpenAI
|
|
193
|
+
Check the [OpenAI Reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries)
|
|
194
194
|
for more details.
|
|
195
195
|
"""
|
|
196
196
|
|
|
197
|
+
openai_send_reasoning_ids: bool
|
|
198
|
+
"""Whether to send reasoning IDs from the message history to the model. Enabled by default.
|
|
199
|
+
|
|
200
|
+
This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
|
|
201
|
+
if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
|
|
202
|
+
for example if you're using a [history processor](../../message-history.md#processing-message-history).
|
|
203
|
+
In that case, you'll want to disable this.
|
|
204
|
+
"""
|
|
205
|
+
|
|
197
206
|
openai_truncation: Literal['disabled', 'auto']
|
|
198
207
|
"""The truncation strategy to use for the model response.
|
|
199
208
|
|
|
@@ -213,6 +222,17 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
|
|
|
213
222
|
`medium`, and `high`.
|
|
214
223
|
"""
|
|
215
224
|
|
|
225
|
+
openai_previous_response_id: Literal['auto'] | str
|
|
226
|
+
"""The ID of a previous response from the model to use as the starting point for a continued conversation.
|
|
227
|
+
|
|
228
|
+
When set to `'auto'`, the request automatically uses the most recent
|
|
229
|
+
`provider_response_id` from the message history and omits earlier messages.
|
|
230
|
+
|
|
231
|
+
This enables the model to use server-side conversation state and faithfully reference previous reasoning.
|
|
232
|
+
See the [OpenAI Responses API documentation](https://platform.openai.com/docs/guides/reasoning#keeping-reasoning-items-in-context)
|
|
233
|
+
for more information.
|
|
234
|
+
"""
|
|
235
|
+
|
|
216
236
|
|
|
217
237
|
@dataclass(init=False)
|
|
218
238
|
class OpenAIChatModel(Model):
|
|
@@ -859,24 +879,34 @@ class OpenAIResponsesModel(Model):
|
|
|
859
879
|
for item in response.output:
|
|
860
880
|
if isinstance(item, responses.ResponseReasoningItem):
|
|
861
881
|
signature = item.encrypted_content
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
882
|
+
if item.summary:
|
|
883
|
+
for summary in item.summary:
|
|
884
|
+
# We use the same id for all summaries so that we can merge them on the round trip.
|
|
885
|
+
items.append(
|
|
886
|
+
ThinkingPart(
|
|
887
|
+
content=summary.text,
|
|
888
|
+
id=item.id,
|
|
889
|
+
signature=signature,
|
|
890
|
+
provider_name=self.system if signature else None,
|
|
891
|
+
)
|
|
892
|
+
)
|
|
893
|
+
# We only need to store the signature once.
|
|
894
|
+
signature = None
|
|
895
|
+
elif signature:
|
|
865
896
|
items.append(
|
|
866
897
|
ThinkingPart(
|
|
867
|
-
content=
|
|
898
|
+
content='',
|
|
868
899
|
id=item.id,
|
|
869
900
|
signature=signature,
|
|
870
|
-
provider_name=self.system
|
|
901
|
+
provider_name=self.system,
|
|
871
902
|
)
|
|
872
903
|
)
|
|
873
|
-
signature = None
|
|
874
904
|
# NOTE: We don't currently handle the raw CoT from gpt-oss `reasoning_text`: https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot
|
|
875
905
|
# If you need this, please file an issue.
|
|
876
906
|
elif isinstance(item, responses.ResponseOutputMessage):
|
|
877
907
|
for content in item.content:
|
|
878
908
|
if isinstance(content, responses.ResponseOutputText): # pragma: no branch
|
|
879
|
-
items.append(TextPart(content.text))
|
|
909
|
+
items.append(TextPart(content.text, id=item.id))
|
|
880
910
|
elif isinstance(item, responses.ResponseFunctionToolCall):
|
|
881
911
|
items.append(
|
|
882
912
|
ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
|
|
@@ -958,7 +988,11 @@ class OpenAIResponsesModel(Model):
|
|
|
958
988
|
else:
|
|
959
989
|
tool_choice = 'auto'
|
|
960
990
|
|
|
961
|
-
|
|
991
|
+
previous_response_id = model_settings.get('openai_previous_response_id')
|
|
992
|
+
if previous_response_id == 'auto':
|
|
993
|
+
previous_response_id, messages = self._get_previous_response_id_and_new_messages(messages)
|
|
994
|
+
|
|
995
|
+
instructions, openai_messages = await self._map_messages(messages, model_settings)
|
|
962
996
|
reasoning = self._get_reasoning(model_settings)
|
|
963
997
|
|
|
964
998
|
text: responses.ResponseTextConfigParam | None = None
|
|
@@ -1008,6 +1042,7 @@ class OpenAIResponsesModel(Model):
|
|
|
1008
1042
|
truncation=model_settings.get('openai_truncation', NOT_GIVEN),
|
|
1009
1043
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
1010
1044
|
service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
|
|
1045
|
+
previous_response_id=previous_response_id,
|
|
1011
1046
|
reasoning=reasoning,
|
|
1012
1047
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
1013
1048
|
text=text or NOT_GIVEN,
|
|
@@ -1073,8 +1108,30 @@ class OpenAIResponsesModel(Model):
|
|
|
1073
1108
|
),
|
|
1074
1109
|
}
|
|
1075
1110
|
|
|
1076
|
-
|
|
1111
|
+
def _get_previous_response_id_and_new_messages(
|
|
1077
1112
|
self, messages: list[ModelMessage]
|
|
1113
|
+
) -> tuple[str | None, list[ModelMessage]]:
|
|
1114
|
+
# When `openai_previous_response_id` is set to 'auto', the most recent
|
|
1115
|
+
# `provider_response_id` from the message history is selected and all
|
|
1116
|
+
# earlier messages are omitted. This allows the OpenAI SDK to reuse
|
|
1117
|
+
# server-side history for efficiency. The returned tuple contains the
|
|
1118
|
+
# `previous_response_id` (if found) and the trimmed list of messages.
|
|
1119
|
+
previous_response_id = None
|
|
1120
|
+
trimmed_messages: list[ModelMessage] = []
|
|
1121
|
+
for m in reversed(messages):
|
|
1122
|
+
if isinstance(m, ModelResponse) and m.provider_name == self.system:
|
|
1123
|
+
previous_response_id = m.provider_response_id
|
|
1124
|
+
break
|
|
1125
|
+
else:
|
|
1126
|
+
trimmed_messages.append(m)
|
|
1127
|
+
|
|
1128
|
+
if previous_response_id and trimmed_messages:
|
|
1129
|
+
return previous_response_id, list(reversed(trimmed_messages))
|
|
1130
|
+
else:
|
|
1131
|
+
return None, messages
|
|
1132
|
+
|
|
1133
|
+
async def _map_messages( # noqa: C901
|
|
1134
|
+
self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
|
|
1078
1135
|
) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
|
|
1079
1136
|
"""Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
|
|
1080
1137
|
openai_messages: list[responses.ResponseInputItemParam] = []
|
|
@@ -1112,30 +1169,77 @@ class OpenAIResponsesModel(Model):
|
|
|
1112
1169
|
else:
|
|
1113
1170
|
assert_never(part)
|
|
1114
1171
|
elif isinstance(message, ModelResponse):
|
|
1172
|
+
message_item: responses.ResponseOutputMessageParam | None = None
|
|
1115
1173
|
reasoning_item: responses.ResponseReasoningItemParam | None = None
|
|
1116
1174
|
for item in message.parts:
|
|
1117
1175
|
if isinstance(item, TextPart):
|
|
1118
|
-
|
|
1176
|
+
if item.id and message.provider_name == self.system:
|
|
1177
|
+
if message_item is None or message_item['id'] != item.id: # pragma: no branch
|
|
1178
|
+
message_item = responses.ResponseOutputMessageParam(
|
|
1179
|
+
role='assistant',
|
|
1180
|
+
id=item.id or _utils.generate_tool_call_id(),
|
|
1181
|
+
content=[],
|
|
1182
|
+
type='message',
|
|
1183
|
+
status='completed',
|
|
1184
|
+
)
|
|
1185
|
+
openai_messages.append(message_item)
|
|
1186
|
+
|
|
1187
|
+
message_item['content'] = [
|
|
1188
|
+
*message_item['content'],
|
|
1189
|
+
responses.ResponseOutputTextParam(
|
|
1190
|
+
text=item.content, type='output_text', annotations=[]
|
|
1191
|
+
),
|
|
1192
|
+
]
|
|
1193
|
+
else:
|
|
1194
|
+
openai_messages.append(
|
|
1195
|
+
responses.EasyInputMessageParam(role='assistant', content=item.content)
|
|
1196
|
+
)
|
|
1119
1197
|
elif isinstance(item, ToolCallPart):
|
|
1120
1198
|
openai_messages.append(self._map_tool_call(item))
|
|
1121
1199
|
elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart):
|
|
1122
1200
|
# We don't currently track built-in tool calls from OpenAI
|
|
1123
1201
|
pass
|
|
1124
1202
|
elif isinstance(item, ThinkingPart):
|
|
1125
|
-
if
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1203
|
+
if (
|
|
1204
|
+
item.id
|
|
1205
|
+
and message.provider_name == self.system
|
|
1206
|
+
and model_settings.get('openai_send_reasoning_ids', True)
|
|
1207
|
+
):
|
|
1208
|
+
signature: str | None = None
|
|
1209
|
+
if (
|
|
1210
|
+
item.signature
|
|
1211
|
+
and item.provider_name == self.system
|
|
1212
|
+
and OpenAIModelProfile.from_profile(
|
|
1213
|
+
self.profile
|
|
1214
|
+
).openai_supports_encrypted_reasoning_content
|
|
1215
|
+
):
|
|
1216
|
+
signature = item.signature
|
|
1217
|
+
|
|
1218
|
+
if (reasoning_item is None or reasoning_item['id'] != item.id) and (
|
|
1219
|
+
signature or item.content
|
|
1220
|
+
): # pragma: no branch
|
|
1221
|
+
reasoning_item = responses.ResponseReasoningItemParam(
|
|
1222
|
+
id=item.id,
|
|
1223
|
+
summary=[],
|
|
1224
|
+
encrypted_content=signature,
|
|
1225
|
+
type='reasoning',
|
|
1226
|
+
)
|
|
1227
|
+
openai_messages.append(reasoning_item)
|
|
1228
|
+
|
|
1229
|
+
if item.content:
|
|
1230
|
+
# The check above guarantees that `reasoning_item` is not None
|
|
1231
|
+
assert reasoning_item is not None
|
|
1232
|
+
reasoning_item['summary'] = [
|
|
1233
|
+
*reasoning_item['summary'],
|
|
1234
|
+
Summary(text=item.content, type='summary_text'),
|
|
1235
|
+
]
|
|
1236
|
+
else:
|
|
1237
|
+
start_tag, end_tag = self.profile.thinking_tags
|
|
1238
|
+
openai_messages.append(
|
|
1239
|
+
responses.EasyInputMessageParam(
|
|
1240
|
+
role='assistant', content='\n'.join([start_tag, item.content, end_tag])
|
|
1241
|
+
)
|
|
1242
|
+
)
|
|
1139
1243
|
else:
|
|
1140
1244
|
assert_never(item)
|
|
1141
1245
|
else:
|
|
@@ -1391,15 +1495,14 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1391
1495
|
|
|
1392
1496
|
elif isinstance(chunk, responses.ResponseOutputItemDoneEvent):
|
|
1393
1497
|
if isinstance(chunk.item, responses.ResponseReasoningItem):
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
pass
|
|
1498
|
+
if signature := chunk.item.encrypted_content: # pragma: no branch
|
|
1499
|
+
# Add the signature to the part corresponding to the first summary item
|
|
1500
|
+
yield self._parts_manager.handle_thinking_delta(
|
|
1501
|
+
vendor_part_id=f'{chunk.item.id}-0',
|
|
1502
|
+
id=chunk.item.id,
|
|
1503
|
+
signature=signature,
|
|
1504
|
+
provider_name=self.provider_name,
|
|
1505
|
+
)
|
|
1403
1506
|
|
|
1404
1507
|
elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
|
|
1405
1508
|
yield self._parts_manager.handle_thinking_delta(
|
|
@@ -1426,7 +1529,9 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1426
1529
|
pass # there's nothing we need to do here
|
|
1427
1530
|
|
|
1428
1531
|
elif isinstance(chunk, responses.ResponseTextDeltaEvent):
|
|
1429
|
-
maybe_event = self._parts_manager.handle_text_delta(
|
|
1532
|
+
maybe_event = self._parts_manager.handle_text_delta(
|
|
1533
|
+
vendor_part_id=chunk.item_id, content=chunk.delta, id=chunk.item_id
|
|
1534
|
+
)
|
|
1430
1535
|
if maybe_event is not None: # pragma: no branch
|
|
1431
1536
|
yield maybe_event
|
|
1432
1537
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.6
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
|
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.6
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -51,13 +51,13 @@ Requires-Dist: prompt-toolkit>=3; extra == 'cli'
|
|
|
51
51
|
Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
|
|
52
52
|
Requires-Dist: rich>=13; extra == 'cli'
|
|
53
53
|
Provides-Extra: cohere
|
|
54
|
-
Requires-Dist: cohere>=5.
|
|
54
|
+
Requires-Dist: cohere>=5.18.0; (platform_system != 'Emscripten') and extra == 'cohere'
|
|
55
55
|
Provides-Extra: dbos
|
|
56
56
|
Requires-Dist: dbos>=1.13.0; extra == 'dbos'
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.6; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -8,19 +8,19 @@ pydantic_ai/_griffe.py,sha256=BphvTL00FHxsSY56GM-bNyCOdwrpL0T3LbDQITWUK_Q,5280
|
|
|
8
8
|
pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
|
|
9
9
|
pydantic_ai/_otel_messages.py,sha256=qLu81aBDEAsUTW6efBzWRXNDMICTrUUBpcGbCEyXr4o,1480
|
|
10
10
|
pydantic_ai/_output.py,sha256=phJ9AQYUlhQhAVikL0FpPn_Vm05V_yK3VYmCUUtH778,38296
|
|
11
|
-
pydantic_ai/_parts_manager.py,sha256=
|
|
11
|
+
pydantic_ai/_parts_manager.py,sha256=1l6RoyhuiDzbZyHP0asYFm63-nuswrIv1H8O8017qAY,18035
|
|
12
12
|
pydantic_ai/_run_context.py,sha256=AFSTtOBbUAnPpM-V5_b5fLMVAFbEBX4oOdYsGR9ayt4,1824
|
|
13
13
|
pydantic_ai/_system_prompt.py,sha256=WdDW_DTGHujcFFaK-J7J6mA4ZDJZ0IOKpyizJA-1Y5Q,1142
|
|
14
14
|
pydantic_ai/_thinking_part.py,sha256=x80-Vkon16GOyq3W6f2qzafTVPC5dCgF7QD3k8ZMmYU,1304
|
|
15
15
|
pydantic_ai/_tool_manager.py,sha256=hB_QzVxnGEbB7ZT2UUDeKeLm_Cv0F-0oCPwInxR-8NE,10369
|
|
16
16
|
pydantic_ai/_utils.py,sha256=xa2PoAcTN-oXhfXOONOighmue-jtSv668o9Fu_IdO0A,16062
|
|
17
17
|
pydantic_ai/ag_ui.py,sha256=Pp-R6XeHip1oQ6_jqV79JyE4TMQ0VOwb99pHxoGdsuU,28911
|
|
18
|
-
pydantic_ai/builtin_tools.py,sha256=
|
|
18
|
+
pydantic_ai/builtin_tools.py,sha256=l4GLWM54yXa1lqBM-o2WMiTx51nhMZRPS7ufleEn474,3301
|
|
19
19
|
pydantic_ai/direct.py,sha256=zMsz6poVgEq7t7L_8FWM6hmKdqTzjyQYL5xzQt_59Us,14951
|
|
20
20
|
pydantic_ai/exceptions.py,sha256=zsXZMKf2BJuVsfuHl1fWTkogLU37bd4yq7D6BKHAzVs,4968
|
|
21
21
|
pydantic_ai/format_prompt.py,sha256=37imBG2Fgpn-_RfAFalOX8Xc_XpGH2gY9tnhJDvxfk8,4243
|
|
22
|
-
pydantic_ai/mcp.py,sha256=
|
|
23
|
-
pydantic_ai/messages.py,sha256=
|
|
22
|
+
pydantic_ai/mcp.py,sha256=vuDS0BsQ3nL6YX8AvfkUYdpjcvAYIorC4Fz3P1tpR8w,34497
|
|
23
|
+
pydantic_ai/messages.py,sha256=bq9Ps-CsYkXdkq4eu1gmIoiLiYsFTwEzB4fXUF_neic,55865
|
|
24
24
|
pydantic_ai/output.py,sha256=wzNgVKJgxyXtSH-uNbRxIaUNLidxlQcwWYT2o1gY2hE,12037
|
|
25
25
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
pydantic_ai/result.py,sha256=FrJbd0nwaRVIxGH_EhV-ITQvrrd-JaDya9EDsE5-Pps,25389
|
|
@@ -55,7 +55,7 @@ pydantic_ai/ext/langchain.py,sha256=iLVEZv1kcLkdIHo3us2yfdi0kVqyJ6qTaCt9BoLWm4k,
|
|
|
55
55
|
pydantic_ai/models/__init__.py,sha256=na9M98DMJ0VpsYhcJ9WI80EI0278XJEJ9jIE_hlW6q4,36256
|
|
56
56
|
pydantic_ai/models/anthropic.py,sha256=-dH4qYSRlRD1XiC1wR89oGHKnFTjxP8zQh0scQDkTCk,32768
|
|
57
57
|
pydantic_ai/models/bedrock.py,sha256=wHo65QNEsfsb1UaUv_TpvJ0WrgFoKoegB6I3eDVnORI,33393
|
|
58
|
-
pydantic_ai/models/cohere.py,sha256=
|
|
58
|
+
pydantic_ai/models/cohere.py,sha256=uQLynz-zWciZBHuvkm8HxJyTOee1bs3pSka-x-56a98,13668
|
|
59
59
|
pydantic_ai/models/fallback.py,sha256=XJ74wRxVT4dF0uewHH3is9I-zcLBK8KFIhpK3BB6mRw,5526
|
|
60
60
|
pydantic_ai/models/function.py,sha256=9ZuRDQXChiA_S3a_M9tmmYQwlyuUEFZ20aYrnPqdTz8,14599
|
|
61
61
|
pydantic_ai/models/gemini.py,sha256=DYEaOnwGmo9FUGVkRRrydGuQwYhnO-Cq5grTurLWgb4,39376
|
|
@@ -65,7 +65,7 @@ pydantic_ai/models/huggingface.py,sha256=f1tZObCJkcbiUCwNoPyuiaRaGYuj0GBFmbA8yFd
|
|
|
65
65
|
pydantic_ai/models/instrumented.py,sha256=DCnyG7HXgV-W2EWac8oZb2A8PL8yarXeU7Rt97l4w_s,21421
|
|
66
66
|
pydantic_ai/models/mcp_sampling.py,sha256=qnLCO3CB5bNQ86SpWRA-CSSOVcCCLPwjHtcNFvW9wHs,3461
|
|
67
67
|
pydantic_ai/models/mistral.py,sha256=ru8EHwFS0xZBN6s1tlssUdjxjQyjB9L_8kFH7qq5U_g,33654
|
|
68
|
-
pydantic_ai/models/openai.py,sha256=
|
|
68
|
+
pydantic_ai/models/openai.py,sha256=r5hTJIjwxDc9HMEZt44CA8JZ07YFRpgPFYm5VHoH3ak,76377
|
|
69
69
|
pydantic_ai/models/test.py,sha256=1kBwi7pSUt9_K1U-hokOilplxJWPQ3KRKH_s8bYmt_s,19969
|
|
70
70
|
pydantic_ai/models/wrapper.py,sha256=9MeHW7mXPsEK03IKL0rtjeX6QgXyZROOOzLh72GiX2k,2148
|
|
71
71
|
pydantic_ai/profiles/__init__.py,sha256=V6uGAVJuIaYRuZOQjkdIyFfDKD5py18RC98njnHOFug,3293
|
|
@@ -120,8 +120,8 @@ pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fb
|
|
|
120
120
|
pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
|
|
121
121
|
pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
|
|
122
122
|
pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
|
|
123
|
-
pydantic_ai_slim-1.0.
|
|
124
|
-
pydantic_ai_slim-1.0.
|
|
125
|
-
pydantic_ai_slim-1.0.
|
|
126
|
-
pydantic_ai_slim-1.0.
|
|
127
|
-
pydantic_ai_slim-1.0.
|
|
123
|
+
pydantic_ai_slim-1.0.6.dist-info/METADATA,sha256=86dtIYqk3j4S9L2PhGOJ3n4k-XU5v1YmEuDbyxsV5nE,4627
|
|
124
|
+
pydantic_ai_slim-1.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
125
|
+
pydantic_ai_slim-1.0.6.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
126
|
+
pydantic_ai_slim-1.0.6.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
127
|
+
pydantic_ai_slim-1.0.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|