pydantic-ai-slim 0.0.23__py3-none-any.whl → 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/agent.py +3 -3
- pydantic_ai/models/__init__.py +17 -12
- pydantic_ai/models/anthropic.py +19 -1
- pydantic_ai/models/cohere.py +10 -0
- pydantic_ai/models/function.py +18 -0
- pydantic_ai/models/gemini.py +22 -1
- pydantic_ai/models/groq.py +19 -1
- pydantic_ai/models/mistral.py +19 -1
- pydantic_ai/models/openai.py +21 -2
- pydantic_ai/models/test.py +18 -1
- pydantic_ai/models/vertexai.py +10 -0
- pydantic_ai/result.py +2 -2
- {pydantic_ai_slim-0.0.23.dist-info → pydantic_ai_slim-0.0.24.dist-info}/METADATA +2 -2
- {pydantic_ai_slim-0.0.23.dist-info → pydantic_ai_slim-0.0.24.dist-info}/RECORD +15 -15
- {pydantic_ai_slim-0.0.23.dist-info → pydantic_ai_slim-0.0.24.dist-info}/WHEEL +0 -0
pydantic_ai/agent.py
CHANGED
|
@@ -275,7 +275,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
275
275
|
"""
|
|
276
276
|
if infer_name and self.name is None:
|
|
277
277
|
self._infer_name(inspect.currentframe())
|
|
278
|
-
model_used =
|
|
278
|
+
model_used = self._get_model(model)
|
|
279
279
|
|
|
280
280
|
deps = self._get_deps(deps)
|
|
281
281
|
new_message_index = len(message_history) if message_history else 0
|
|
@@ -520,7 +520,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
520
520
|
# f_back because `asynccontextmanager` adds one frame
|
|
521
521
|
if frame := inspect.currentframe(): # pragma: no branch
|
|
522
522
|
self._infer_name(frame.f_back)
|
|
523
|
-
model_used =
|
|
523
|
+
model_used = self._get_model(model)
|
|
524
524
|
|
|
525
525
|
deps = self._get_deps(deps)
|
|
526
526
|
new_message_index = len(message_history) if message_history else 0
|
|
@@ -971,7 +971,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
971
971
|
|
|
972
972
|
self._function_tools[tool.name] = tool
|
|
973
973
|
|
|
974
|
-
|
|
974
|
+
def _get_model(self, model: models.Model | models.KnownModelName | None) -> models.Model:
|
|
975
975
|
"""Create a model configured for this agent.
|
|
976
976
|
|
|
977
977
|
Args:
|
pydantic_ai/models/__init__.py
CHANGED
|
@@ -54,6 +54,8 @@ KnownModelName = Literal[
|
|
|
54
54
|
'google-gla:gemini-2.0-flash-exp',
|
|
55
55
|
'google-gla:gemini-2.0-flash-thinking-exp-01-21',
|
|
56
56
|
'google-gla:gemini-exp-1206',
|
|
57
|
+
'google-gla:gemini-2.0-flash',
|
|
58
|
+
'google-gla:gemini-2.0-flash-lite-preview-02-05',
|
|
57
59
|
'google-vertex:gemini-1.0-pro',
|
|
58
60
|
'google-vertex:gemini-1.5-flash',
|
|
59
61
|
'google-vertex:gemini-1.5-flash-8b',
|
|
@@ -61,6 +63,8 @@ KnownModelName = Literal[
|
|
|
61
63
|
'google-vertex:gemini-2.0-flash-exp',
|
|
62
64
|
'google-vertex:gemini-2.0-flash-thinking-exp-01-21',
|
|
63
65
|
'google-vertex:gemini-exp-1206',
|
|
66
|
+
'google-vertex:gemini-2.0-flash',
|
|
67
|
+
'google-vertex:gemini-2.0-flash-lite-preview-02-05',
|
|
64
68
|
'gpt-3.5-turbo',
|
|
65
69
|
'gpt-3.5-turbo-0125',
|
|
66
70
|
'gpt-3.5-turbo-0301',
|
|
@@ -173,9 +177,6 @@ class ModelRequestParameters:
|
|
|
173
177
|
class Model(ABC):
|
|
174
178
|
"""Abstract class for a model."""
|
|
175
179
|
|
|
176
|
-
_model_name: str
|
|
177
|
-
_system: str | None
|
|
178
|
-
|
|
179
180
|
@abstractmethod
|
|
180
181
|
async def request(
|
|
181
182
|
self,
|
|
@@ -201,24 +202,25 @@ class Model(ABC):
|
|
|
201
202
|
yield # pragma: no cover
|
|
202
203
|
|
|
203
204
|
@property
|
|
205
|
+
@abstractmethod
|
|
204
206
|
def model_name(self) -> str:
|
|
205
207
|
"""The model name."""
|
|
206
|
-
|
|
208
|
+
raise NotImplementedError()
|
|
207
209
|
|
|
208
210
|
@property
|
|
211
|
+
@abstractmethod
|
|
209
212
|
def system(self) -> str | None:
|
|
210
213
|
"""The system / model provider, ex: openai."""
|
|
211
|
-
|
|
214
|
+
raise NotImplementedError()
|
|
212
215
|
|
|
213
216
|
|
|
214
217
|
@dataclass
|
|
215
218
|
class StreamedResponse(ABC):
|
|
216
219
|
"""Streamed response from an LLM when calling a tool."""
|
|
217
220
|
|
|
218
|
-
_model_name: str
|
|
219
|
-
_usage: Usage = field(default_factory=Usage, init=False)
|
|
220
221
|
_parts_manager: ModelResponsePartsManager = field(default_factory=ModelResponsePartsManager, init=False)
|
|
221
222
|
_event_iterator: AsyncIterator[ModelResponseStreamEvent] | None = field(default=None, init=False)
|
|
223
|
+
_usage: Usage = field(default_factory=Usage, init=False)
|
|
222
224
|
|
|
223
225
|
def __aiter__(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
224
226
|
"""Stream the response as an async iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s."""
|
|
@@ -240,17 +242,20 @@ class StreamedResponse(ABC):
|
|
|
240
242
|
def get(self) -> ModelResponse:
|
|
241
243
|
"""Build a [`ModelResponse`][pydantic_ai.messages.ModelResponse] from the data received from the stream so far."""
|
|
242
244
|
return ModelResponse(
|
|
243
|
-
parts=self._parts_manager.get_parts(), model_name=self.
|
|
245
|
+
parts=self._parts_manager.get_parts(), model_name=self.model_name, timestamp=self.timestamp
|
|
244
246
|
)
|
|
245
247
|
|
|
246
|
-
def model_name(self) -> str:
|
|
247
|
-
"""Get the model name of the response."""
|
|
248
|
-
return self._model_name
|
|
249
|
-
|
|
250
248
|
def usage(self) -> Usage:
|
|
251
249
|
"""Get the usage of the response so far. This will not be the final usage until the stream is exhausted."""
|
|
252
250
|
return self._usage
|
|
253
251
|
|
|
252
|
+
@property
|
|
253
|
+
@abstractmethod
|
|
254
|
+
def model_name(self) -> str:
|
|
255
|
+
"""Get the model name of the response."""
|
|
256
|
+
raise NotImplementedError()
|
|
257
|
+
|
|
258
|
+
@property
|
|
254
259
|
@abstractmethod
|
|
255
260
|
def timestamp(self) -> datetime:
|
|
256
261
|
"""Get the timestamp of the response."""
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -162,6 +162,16 @@ class AnthropicModel(Model):
|
|
|
162
162
|
async with response:
|
|
163
163
|
yield await self._process_streamed_response(response)
|
|
164
164
|
|
|
165
|
+
@property
|
|
166
|
+
def model_name(self) -> AnthropicModelName:
|
|
167
|
+
"""The model name."""
|
|
168
|
+
return self._model_name
|
|
169
|
+
|
|
170
|
+
@property
|
|
171
|
+
def system(self) -> str | None:
|
|
172
|
+
"""The system / model provider."""
|
|
173
|
+
return self._system
|
|
174
|
+
|
|
165
175
|
@overload
|
|
166
176
|
async def _messages_create(
|
|
167
177
|
self,
|
|
@@ -236,7 +246,7 @@ class AnthropicModel(Model):
|
|
|
236
246
|
)
|
|
237
247
|
)
|
|
238
248
|
|
|
239
|
-
return ModelResponse(items, model_name=
|
|
249
|
+
return ModelResponse(items, model_name=response.model)
|
|
240
250
|
|
|
241
251
|
async def _process_streamed_response(self, response: AsyncStream[RawMessageStreamEvent]) -> StreamedResponse:
|
|
242
252
|
peekable_response = _utils.PeekableAsyncStream(response)
|
|
@@ -362,6 +372,7 @@ def _map_usage(message: AnthropicMessage | RawMessageStreamEvent) -> usage.Usage
|
|
|
362
372
|
class AnthropicStreamedResponse(StreamedResponse):
|
|
363
373
|
"""Implementation of `StreamedResponse` for Anthropic models."""
|
|
364
374
|
|
|
375
|
+
_model_name: AnthropicModelName
|
|
365
376
|
_response: AsyncIterable[RawMessageStreamEvent]
|
|
366
377
|
_timestamp: datetime
|
|
367
378
|
|
|
@@ -414,5 +425,12 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
414
425
|
elif isinstance(event, (RawContentBlockStopEvent, RawMessageStopEvent)):
|
|
415
426
|
current_block = None
|
|
416
427
|
|
|
428
|
+
@property
|
|
429
|
+
def model_name(self) -> AnthropicModelName:
|
|
430
|
+
"""Get the model name of the response."""
|
|
431
|
+
return self._model_name
|
|
432
|
+
|
|
433
|
+
@property
|
|
417
434
|
def timestamp(self) -> datetime:
|
|
435
|
+
"""Get the timestamp of the response."""
|
|
418
436
|
return self._timestamp
|
pydantic_ai/models/cohere.py
CHANGED
|
@@ -136,6 +136,16 @@ class CohereModel(Model):
|
|
|
136
136
|
response = await self._chat(messages, cast(CohereModelSettings, model_settings or {}), model_request_parameters)
|
|
137
137
|
return self._process_response(response), _map_usage(response)
|
|
138
138
|
|
|
139
|
+
@property
|
|
140
|
+
def model_name(self) -> CohereModelName:
|
|
141
|
+
"""The model name."""
|
|
142
|
+
return self._model_name
|
|
143
|
+
|
|
144
|
+
@property
|
|
145
|
+
def system(self) -> str | None:
|
|
146
|
+
"""The system / model provider."""
|
|
147
|
+
return self._system
|
|
148
|
+
|
|
139
149
|
async def _chat(
|
|
140
150
|
self,
|
|
141
151
|
messages: list[ModelMessage],
|
pydantic_ai/models/function.py
CHANGED
|
@@ -121,6 +121,16 @@ class FunctionModel(Model):
|
|
|
121
121
|
|
|
122
122
|
yield FunctionStreamedResponse(_model_name=f'function:{self.stream_function.__name__}', _iter=response_stream)
|
|
123
123
|
|
|
124
|
+
@property
|
|
125
|
+
def model_name(self) -> str:
|
|
126
|
+
"""The model name."""
|
|
127
|
+
return self._model_name
|
|
128
|
+
|
|
129
|
+
@property
|
|
130
|
+
def system(self) -> str | None:
|
|
131
|
+
"""The system / model provider."""
|
|
132
|
+
return self._system
|
|
133
|
+
|
|
124
134
|
|
|
125
135
|
@dataclass(frozen=True)
|
|
126
136
|
class AgentInfo:
|
|
@@ -178,6 +188,7 @@ E.g. you need to yield all text or all `DeltaToolCalls`, not mix them.
|
|
|
178
188
|
class FunctionStreamedResponse(StreamedResponse):
|
|
179
189
|
"""Implementation of `StreamedResponse` for [FunctionModel][pydantic_ai.models.function.FunctionModel]."""
|
|
180
190
|
|
|
191
|
+
_model_name: str
|
|
181
192
|
_iter: AsyncIterator[str | DeltaToolCalls]
|
|
182
193
|
_timestamp: datetime = field(default_factory=_utils.now_utc)
|
|
183
194
|
|
|
@@ -205,7 +216,14 @@ class FunctionStreamedResponse(StreamedResponse):
|
|
|
205
216
|
if maybe_event is not None:
|
|
206
217
|
yield maybe_event
|
|
207
218
|
|
|
219
|
+
@property
|
|
220
|
+
def model_name(self) -> str:
|
|
221
|
+
"""Get the model name of the response."""
|
|
222
|
+
return self._model_name
|
|
223
|
+
|
|
224
|
+
@property
|
|
208
225
|
def timestamp(self) -> datetime:
|
|
226
|
+
"""Get the timestamp of the response."""
|
|
209
227
|
return self._timestamp
|
|
210
228
|
|
|
211
229
|
|
pydantic_ai/models/gemini.py
CHANGED
|
@@ -47,6 +47,8 @@ LatestGeminiModelNames = Literal[
|
|
|
47
47
|
'gemini-2.0-flash-exp',
|
|
48
48
|
'gemini-2.0-flash-thinking-exp-01-21',
|
|
49
49
|
'gemini-exp-1206',
|
|
50
|
+
'gemini-2.0-flash',
|
|
51
|
+
'gemini-2.0-flash-lite-preview-02-05',
|
|
50
52
|
]
|
|
51
53
|
"""Latest Gemini models."""
|
|
52
54
|
|
|
@@ -147,6 +149,16 @@ class GeminiModel(Model):
|
|
|
147
149
|
) as http_response:
|
|
148
150
|
yield await self._process_streamed_response(http_response)
|
|
149
151
|
|
|
152
|
+
@property
|
|
153
|
+
def model_name(self) -> GeminiModelName:
|
|
154
|
+
"""The model name."""
|
|
155
|
+
return self._model_name
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def system(self) -> str | None:
|
|
159
|
+
"""The system / model provider."""
|
|
160
|
+
return self._system
|
|
161
|
+
|
|
150
162
|
def _get_tools(self, model_request_parameters: ModelRequestParameters) -> _GeminiTools | None:
|
|
151
163
|
tools = [_function_from_abstract_tool(t) for t in model_request_parameters.function_tools]
|
|
152
164
|
if model_request_parameters.result_tools:
|
|
@@ -231,7 +243,7 @@ class GeminiModel(Model):
|
|
|
231
243
|
else:
|
|
232
244
|
raise UnexpectedModelBehavior('Content field missing from Gemini response', str(response))
|
|
233
245
|
parts = response['candidates'][0]['content']['parts']
|
|
234
|
-
return _process_response_from_parts(parts, model_name=self._model_name)
|
|
246
|
+
return _process_response_from_parts(parts, model_name=response.get('model_version', self._model_name))
|
|
235
247
|
|
|
236
248
|
async def _process_streamed_response(self, http_response: HTTPResponse) -> StreamedResponse:
|
|
237
249
|
"""Process a streamed response, and prepare a streaming response to return."""
|
|
@@ -313,6 +325,7 @@ class ApiKeyAuth:
|
|
|
313
325
|
class GeminiStreamedResponse(StreamedResponse):
|
|
314
326
|
"""Implementation of `StreamedResponse` for the Gemini model."""
|
|
315
327
|
|
|
328
|
+
_model_name: GeminiModelName
|
|
316
329
|
_content: bytearray
|
|
317
330
|
_stream: AsyncIterator[bytes]
|
|
318
331
|
_timestamp: datetime = field(default_factory=_utils.now_utc, init=False)
|
|
@@ -376,7 +389,14 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
376
389
|
self._usage += _metadata_as_usage(r)
|
|
377
390
|
yield r
|
|
378
391
|
|
|
392
|
+
@property
|
|
393
|
+
def model_name(self) -> GeminiModelName:
|
|
394
|
+
"""Get the model name of the response."""
|
|
395
|
+
return self._model_name
|
|
396
|
+
|
|
397
|
+
@property
|
|
379
398
|
def timestamp(self) -> datetime:
|
|
399
|
+
"""Get the timestamp of the response."""
|
|
380
400
|
return self._timestamp
|
|
381
401
|
|
|
382
402
|
|
|
@@ -608,6 +628,7 @@ class _GeminiResponse(TypedDict):
|
|
|
608
628
|
# usageMetadata appears to be required by both APIs but is omitted when streaming responses until the last response
|
|
609
629
|
usage_metadata: NotRequired[Annotated[_GeminiUsageMetaData, pydantic.Field(alias='usageMetadata')]]
|
|
610
630
|
prompt_feedback: NotRequired[Annotated[_GeminiPromptFeedback, pydantic.Field(alias='promptFeedback')]]
|
|
631
|
+
model_version: NotRequired[Annotated[str, pydantic.Field(alias='modelVersion')]]
|
|
611
632
|
|
|
612
633
|
|
|
613
634
|
class _GeminiCandidates(TypedDict):
|
pydantic_ai/models/groq.py
CHANGED
|
@@ -146,6 +146,16 @@ class GroqModel(Model):
|
|
|
146
146
|
async with response:
|
|
147
147
|
yield await self._process_streamed_response(response)
|
|
148
148
|
|
|
149
|
+
@property
|
|
150
|
+
def model_name(self) -> GroqModelName:
|
|
151
|
+
"""The model name."""
|
|
152
|
+
return self._model_name
|
|
153
|
+
|
|
154
|
+
@property
|
|
155
|
+
def system(self) -> str | None:
|
|
156
|
+
"""The system / model provider."""
|
|
157
|
+
return self._system
|
|
158
|
+
|
|
149
159
|
@overload
|
|
150
160
|
async def _completions_create(
|
|
151
161
|
self,
|
|
@@ -212,7 +222,7 @@ class GroqModel(Model):
|
|
|
212
222
|
if choice.message.tool_calls is not None:
|
|
213
223
|
for c in choice.message.tool_calls:
|
|
214
224
|
items.append(ToolCallPart(tool_name=c.function.name, args=c.function.arguments, tool_call_id=c.id))
|
|
215
|
-
return ModelResponse(items, model_name=
|
|
225
|
+
return ModelResponse(items, model_name=response.model, timestamp=timestamp)
|
|
216
226
|
|
|
217
227
|
async def _process_streamed_response(self, response: AsyncStream[ChatCompletionChunk]) -> GroqStreamedResponse:
|
|
218
228
|
"""Process a streamed response, and prepare a streaming response to return."""
|
|
@@ -305,6 +315,7 @@ class GroqModel(Model):
|
|
|
305
315
|
class GroqStreamedResponse(StreamedResponse):
|
|
306
316
|
"""Implementation of `StreamedResponse` for Groq models."""
|
|
307
317
|
|
|
318
|
+
_model_name: GroqModelName
|
|
308
319
|
_response: AsyncIterable[ChatCompletionChunk]
|
|
309
320
|
_timestamp: datetime
|
|
310
321
|
|
|
@@ -333,7 +344,14 @@ class GroqStreamedResponse(StreamedResponse):
|
|
|
333
344
|
if maybe_event is not None:
|
|
334
345
|
yield maybe_event
|
|
335
346
|
|
|
347
|
+
@property
|
|
348
|
+
def model_name(self) -> GroqModelName:
|
|
349
|
+
"""Get the model name of the response."""
|
|
350
|
+
return self._model_name
|
|
351
|
+
|
|
352
|
+
@property
|
|
336
353
|
def timestamp(self) -> datetime:
|
|
354
|
+
"""Get the timestamp of the response."""
|
|
337
355
|
return self._timestamp
|
|
338
356
|
|
|
339
357
|
|
pydantic_ai/models/mistral.py
CHANGED
|
@@ -165,6 +165,16 @@ class MistralModel(Model):
|
|
|
165
165
|
async with response:
|
|
166
166
|
yield await self._process_streamed_response(model_request_parameters.result_tools, response)
|
|
167
167
|
|
|
168
|
+
@property
|
|
169
|
+
def model_name(self) -> MistralModelName:
|
|
170
|
+
"""The model name."""
|
|
171
|
+
return self._model_name
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def system(self) -> str | None:
|
|
175
|
+
"""The system / model provider."""
|
|
176
|
+
return self._system
|
|
177
|
+
|
|
168
178
|
async def _completions_create(
|
|
169
179
|
self,
|
|
170
180
|
messages: list[ModelMessage],
|
|
@@ -296,7 +306,7 @@ class MistralModel(Model):
|
|
|
296
306
|
tool = self._map_mistral_to_pydantic_tool_call(tool_call=tool_call)
|
|
297
307
|
parts.append(tool)
|
|
298
308
|
|
|
299
|
-
return ModelResponse(parts, model_name=
|
|
309
|
+
return ModelResponse(parts, model_name=response.model, timestamp=timestamp)
|
|
300
310
|
|
|
301
311
|
async def _process_streamed_response(
|
|
302
312
|
self,
|
|
@@ -461,6 +471,7 @@ MistralToolCallId = Union[str, None]
|
|
|
461
471
|
class MistralStreamedResponse(StreamedResponse):
|
|
462
472
|
"""Implementation of `StreamedResponse` for Mistral models."""
|
|
463
473
|
|
|
474
|
+
_model_name: MistralModelName
|
|
464
475
|
_response: AsyncIterable[MistralCompletionEvent]
|
|
465
476
|
_timestamp: datetime
|
|
466
477
|
_result_tools: dict[str, ToolDefinition]
|
|
@@ -502,7 +513,14 @@ class MistralStreamedResponse(StreamedResponse):
|
|
|
502
513
|
vendor_part_id=index, tool_name=dtc.function.name, args=dtc.function.arguments, tool_call_id=dtc.id
|
|
503
514
|
)
|
|
504
515
|
|
|
516
|
+
@property
|
|
517
|
+
def model_name(self) -> MistralModelName:
|
|
518
|
+
"""Get the model name of the response."""
|
|
519
|
+
return self._model_name
|
|
520
|
+
|
|
521
|
+
@property
|
|
505
522
|
def timestamp(self) -> datetime:
|
|
523
|
+
"""Get the timestamp of the response."""
|
|
506
524
|
return self._timestamp
|
|
507
525
|
|
|
508
526
|
@staticmethod
|
pydantic_ai/models/openai.py
CHANGED
|
@@ -122,7 +122,8 @@ class OpenAIModel(Model):
|
|
|
122
122
|
# openai compatible models do not always need an API key.
|
|
123
123
|
if api_key is None and 'OPENAI_API_KEY' not in os.environ and base_url is not None and openai_client is None:
|
|
124
124
|
api_key = ''
|
|
125
|
-
|
|
125
|
+
|
|
126
|
+
if openai_client is not None:
|
|
126
127
|
assert http_client is None, 'Cannot provide both `openai_client` and `http_client`'
|
|
127
128
|
assert base_url is None, 'Cannot provide both `openai_client` and `base_url`'
|
|
128
129
|
assert api_key is None, 'Cannot provide both `openai_client` and `api_key`'
|
|
@@ -163,6 +164,16 @@ class OpenAIModel(Model):
|
|
|
163
164
|
async with response:
|
|
164
165
|
yield await self._process_streamed_response(response)
|
|
165
166
|
|
|
167
|
+
@property
|
|
168
|
+
def model_name(self) -> OpenAIModelName:
|
|
169
|
+
"""The model name."""
|
|
170
|
+
return self._model_name
|
|
171
|
+
|
|
172
|
+
@property
|
|
173
|
+
def system(self) -> str | None:
|
|
174
|
+
"""The system / model provider."""
|
|
175
|
+
return self._system
|
|
176
|
+
|
|
166
177
|
@overload
|
|
167
178
|
async def _completions_create(
|
|
168
179
|
self,
|
|
@@ -232,7 +243,7 @@ class OpenAIModel(Model):
|
|
|
232
243
|
if choice.message.tool_calls is not None:
|
|
233
244
|
for c in choice.message.tool_calls:
|
|
234
245
|
items.append(ToolCallPart(c.function.name, c.function.arguments, c.id))
|
|
235
|
-
return ModelResponse(items, model_name=
|
|
246
|
+
return ModelResponse(items, model_name=response.model, timestamp=timestamp)
|
|
236
247
|
|
|
237
248
|
async def _process_streamed_response(self, response: AsyncStream[ChatCompletionChunk]) -> OpenAIStreamedResponse:
|
|
238
249
|
"""Process a streamed response, and prepare a streaming response to return."""
|
|
@@ -331,6 +342,7 @@ class OpenAIModel(Model):
|
|
|
331
342
|
class OpenAIStreamedResponse(StreamedResponse):
|
|
332
343
|
"""Implementation of `StreamedResponse` for OpenAI models."""
|
|
333
344
|
|
|
345
|
+
_model_name: OpenAIModelName
|
|
334
346
|
_response: AsyncIterable[ChatCompletionChunk]
|
|
335
347
|
_timestamp: datetime
|
|
336
348
|
|
|
@@ -358,7 +370,14 @@ class OpenAIStreamedResponse(StreamedResponse):
|
|
|
358
370
|
if maybe_event is not None:
|
|
359
371
|
yield maybe_event
|
|
360
372
|
|
|
373
|
+
@property
|
|
374
|
+
def model_name(self) -> OpenAIModelName:
|
|
375
|
+
"""Get the model name of the response."""
|
|
376
|
+
return self._model_name
|
|
377
|
+
|
|
378
|
+
@property
|
|
361
379
|
def timestamp(self) -> datetime:
|
|
380
|
+
"""Get the timestamp of the response."""
|
|
362
381
|
return self._timestamp
|
|
363
382
|
|
|
364
383
|
|
pydantic_ai/models/test.py
CHANGED
|
@@ -107,6 +107,16 @@ class TestModel(Model):
|
|
|
107
107
|
_model_name=self._model_name, _structured_response=model_response, _messages=messages
|
|
108
108
|
)
|
|
109
109
|
|
|
110
|
+
@property
|
|
111
|
+
def model_name(self) -> str:
|
|
112
|
+
"""The model name."""
|
|
113
|
+
return self._model_name
|
|
114
|
+
|
|
115
|
+
@property
|
|
116
|
+
def system(self) -> str | None:
|
|
117
|
+
"""The system / model provider."""
|
|
118
|
+
return self._system
|
|
119
|
+
|
|
110
120
|
def gen_tool_args(self, tool_def: ToolDefinition) -> Any:
|
|
111
121
|
return _JsonSchemaTestData(tool_def.parameters_json_schema, self.seed).generate()
|
|
112
122
|
|
|
@@ -221,9 +231,9 @@ class TestModel(Model):
|
|
|
221
231
|
class TestStreamedResponse(StreamedResponse):
|
|
222
232
|
"""A structured response that streams test data."""
|
|
223
233
|
|
|
234
|
+
_model_name: str
|
|
224
235
|
_structured_response: ModelResponse
|
|
225
236
|
_messages: InitVar[Iterable[ModelMessage]]
|
|
226
|
-
|
|
227
237
|
_timestamp: datetime = field(default_factory=_utils.now_utc, init=False)
|
|
228
238
|
|
|
229
239
|
def __post_init__(self, _messages: Iterable[ModelMessage]):
|
|
@@ -249,7 +259,14 @@ class TestStreamedResponse(StreamedResponse):
|
|
|
249
259
|
vendor_part_id=i, tool_name=part.tool_name, args=part.args, tool_call_id=part.tool_call_id
|
|
250
260
|
)
|
|
251
261
|
|
|
262
|
+
@property
|
|
263
|
+
def model_name(self) -> str:
|
|
264
|
+
"""Get the model name of the response."""
|
|
265
|
+
return self._model_name
|
|
266
|
+
|
|
267
|
+
@property
|
|
252
268
|
def timestamp(self) -> datetime:
|
|
269
|
+
"""Get the timestamp of the response."""
|
|
253
270
|
return self._timestamp
|
|
254
271
|
|
|
255
272
|
|
pydantic_ai/models/vertexai.py
CHANGED
|
@@ -161,6 +161,16 @@ class VertexAIModel(GeminiModel):
|
|
|
161
161
|
async with super().request_stream(messages, model_settings, model_request_parameters) as value:
|
|
162
162
|
yield value
|
|
163
163
|
|
|
164
|
+
@property
|
|
165
|
+
def model_name(self) -> GeminiModelName:
|
|
166
|
+
"""The model name."""
|
|
167
|
+
return self._model_name
|
|
168
|
+
|
|
169
|
+
@property
|
|
170
|
+
def system(self) -> str | None:
|
|
171
|
+
"""The system / model provider."""
|
|
172
|
+
return self._system
|
|
173
|
+
|
|
164
174
|
|
|
165
175
|
# pyright: reportUnknownMemberType=false
|
|
166
176
|
def _creds_from_file(service_account_file: str | Path) -> ServiceAccountCredentials:
|
pydantic_ai/result.py
CHANGED
|
@@ -286,7 +286,7 @@ class StreamedRunResult(_BaseRunResult[ResultDataT], Generic[AgentDepsT, ResultD
|
|
|
286
286
|
await self._marked_completed(
|
|
287
287
|
_messages.ModelResponse(
|
|
288
288
|
parts=[_messages.TextPart(combined_validated_text)],
|
|
289
|
-
model_name=self._stream_response.model_name
|
|
289
|
+
model_name=self._stream_response.model_name,
|
|
290
290
|
)
|
|
291
291
|
)
|
|
292
292
|
|
|
@@ -347,7 +347,7 @@ class StreamedRunResult(_BaseRunResult[ResultDataT], Generic[AgentDepsT, ResultD
|
|
|
347
347
|
|
|
348
348
|
def timestamp(self) -> datetime:
|
|
349
349
|
"""Get the timestamp of the response."""
|
|
350
|
-
return self._stream_response.timestamp
|
|
350
|
+
return self._stream_response.timestamp
|
|
351
351
|
|
|
352
352
|
async def validate_structured_result(
|
|
353
353
|
self, message: _messages.ModelResponse, *, allow_partial: bool = False
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.24
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -28,7 +28,7 @@ Requires-Dist: eval-type-backport>=0.2.0
|
|
|
28
28
|
Requires-Dist: griffe>=1.3.2
|
|
29
29
|
Requires-Dist: httpx>=0.27
|
|
30
30
|
Requires-Dist: logfire-api>=1.2.0
|
|
31
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
31
|
+
Requires-Dist: pydantic-graph==0.0.24
|
|
32
32
|
Requires-Dist: pydantic>=2.10
|
|
33
33
|
Provides-Extra: anthropic
|
|
34
34
|
Requires-Dist: anthropic>=0.40.0; extra == 'anthropic'
|
|
@@ -6,25 +6,25 @@ pydantic_ai/_pydantic.py,sha256=dROz3Hmfdi0C2exq88FhefDRVo_8S3rtkXnoUHzsz0c,8753
|
|
|
6
6
|
pydantic_ai/_result.py,sha256=tN1pVulf_EM4bkBvpNUWPnUXezLY-sBrJEVCFdy2nLU,10264
|
|
7
7
|
pydantic_ai/_system_prompt.py,sha256=602c2jyle2R_SesOrITBDETZqsLk4BZ8Cbo8yEhmx04,1120
|
|
8
8
|
pydantic_ai/_utils.py,sha256=zfuY3NiPPsSM5J1q2JElfbfIa8S1ONGOlC7M-iyBVso,9430
|
|
9
|
-
pydantic_ai/agent.py,sha256=
|
|
9
|
+
pydantic_ai/agent.py,sha256=q9nc_bTUuwxYVrzmUb5NkwOUWQm2XIsxpTuyyAuVdpU,44900
|
|
10
10
|
pydantic_ai/exceptions.py,sha256=eGDKX6bGhgVxXBzu81Sk3iiAkXr0GUtgT7bD5Rxlqpg,2028
|
|
11
11
|
pydantic_ai/format_as_xml.py,sha256=QE7eMlg5-YUMw1_2kcI3h0uKYPZZyGkgXFDtfZTMeeI,4480
|
|
12
12
|
pydantic_ai/messages.py,sha256=kzXn4ZjlX9Sy2KXgFHWYbbwPk7TzTPdztzOJLWEONwU,17101
|
|
13
13
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
|
-
pydantic_ai/result.py,sha256=
|
|
14
|
+
pydantic_ai/result.py,sha256=HYTkmb-6fzbxXRlsPOyVic_EexEGN4JLfZfVBKpJtfM,18390
|
|
15
15
|
pydantic_ai/settings.py,sha256=ntuWnke9UA18aByDxk9OIhN0tAgOaPdqCEkRf-wlp8Y,3059
|
|
16
16
|
pydantic_ai/tools.py,sha256=lhupwm815lPlFFS79B0P61AyhUYtepA62LbZOCJrPEY,13205
|
|
17
17
|
pydantic_ai/usage.py,sha256=60d9f6M7YEYuKMbqDGDogX4KsA73fhDtWyDXYXoIPaI,4948
|
|
18
|
-
pydantic_ai/models/__init__.py,sha256=
|
|
19
|
-
pydantic_ai/models/anthropic.py,sha256=
|
|
20
|
-
pydantic_ai/models/cohere.py,sha256=
|
|
21
|
-
pydantic_ai/models/function.py,sha256=
|
|
22
|
-
pydantic_ai/models/gemini.py,sha256=
|
|
23
|
-
pydantic_ai/models/groq.py,sha256=
|
|
24
|
-
pydantic_ai/models/mistral.py,sha256=
|
|
25
|
-
pydantic_ai/models/openai.py,sha256=
|
|
26
|
-
pydantic_ai/models/test.py,sha256=
|
|
27
|
-
pydantic_ai/models/vertexai.py,sha256=
|
|
28
|
-
pydantic_ai_slim-0.0.
|
|
29
|
-
pydantic_ai_slim-0.0.
|
|
30
|
-
pydantic_ai_slim-0.0.
|
|
18
|
+
pydantic_ai/models/__init__.py,sha256=WCvo5bI2dWJbKPhdGWID-HFGrZGo3jvtHhgwaDuY75s,13219
|
|
19
|
+
pydantic_ai/models/anthropic.py,sha256=lArkCv0oZQeG1SqyEHlqKUyrcEYzYrS5fkW_ICKlikU,17749
|
|
20
|
+
pydantic_ai/models/cohere.py,sha256=yLUA3bTQg-4NNjpvD2DXkUIMluia8vETChsjYYN13lg,11016
|
|
21
|
+
pydantic_ai/models/function.py,sha256=5AWAFo-AZ0UyX8wBAYc5nd6U6XpOFvnsYr8vVfatEXQ,10333
|
|
22
|
+
pydantic_ai/models/gemini.py,sha256=CGaHve4wQxkGEa1hArgldvOBk79blGdOqdeLxMH_NQM,30846
|
|
23
|
+
pydantic_ai/models/groq.py,sha256=M7P-9LrH1lm4wyIMiytsHf28bwkukOyVzw3S7F9sbic,14612
|
|
24
|
+
pydantic_ai/models/mistral.py,sha256=1DA1AX-V46p7dI75-4aRwQ8TrQxdh5ZQaoMYZO7VRkI,25868
|
|
25
|
+
pydantic_ai/models/openai.py,sha256=kUV13suTp1ueibH2uYWaRCX832GApImdxSiGqNyfWo4,17161
|
|
26
|
+
pydantic_ai/models/test.py,sha256=6X0r78biqnvakiH1nFKYQFNOs4dWrS-yqe0RFBrKW0s,16873
|
|
27
|
+
pydantic_ai/models/vertexai.py,sha256=9Kp_1KMBlbP8_HRJTuFnrkkFmlJ7yFhADQYjxOgIh9Y,9523
|
|
28
|
+
pydantic_ai_slim-0.0.24.dist-info/METADATA,sha256=kSbEzpXrgMTSHX0ojDw-Mtf8QQFCFE5fIJTRZihLe1w,2839
|
|
29
|
+
pydantic_ai_slim-0.0.24.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
30
|
+
pydantic_ai_slim-0.0.24.dist-info/RECORD,,
|
|
File without changes
|