pydantic-ai-slim 0.0.17__py3-none-any.whl → 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

pydantic_ai/result.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from collections.abc import AsyncIterator, Awaitable, Callable
4
+ from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable
5
5
  from copy import deepcopy
6
6
  from dataclasses import dataclass, field
7
7
  from datetime import datetime
@@ -169,7 +169,7 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
169
169
  """Result of a streamed run that returns structured data via a tool call."""
170
170
 
171
171
  _usage_limits: UsageLimits | None
172
- _stream_response: models.EitherStreamedResponse
172
+ _stream_response: models.StreamedResponse
173
173
  _result_schema: _result.ResultSchema[ResultData] | None
174
174
  _run_ctx: RunContext[AgentDeps]
175
175
  _result_validators: list[_result.ResultValidator[AgentDeps, ResultData]]
@@ -200,20 +200,13 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
200
200
  Returns:
201
201
  An async iterable of the response data.
202
202
  """
203
- if isinstance(self._stream_response, models.StreamTextResponse):
204
- async for text in self.stream_text(debounce_by=debounce_by):
205
- yield cast(ResultData, text)
206
- else:
207
- async for structured_message, is_last in self.stream_structured(debounce_by=debounce_by):
208
- yield await self.validate_structured_result(structured_message, allow_partial=not is_last)
203
+ async for structured_message, is_last in self.stream_structured(debounce_by=debounce_by):
204
+ result = await self.validate_structured_result(structured_message, allow_partial=not is_last)
205
+ yield result
209
206
 
210
207
  async def stream_text(self, *, delta: bool = False, debounce_by: float | None = 0.1) -> AsyncIterator[str]:
211
208
  """Stream the text result as an async iterable.
212
209
 
213
- !!! note
214
- This method will fail if the response is structured,
215
- e.g. if [`is_structured`][pydantic_ai.result.StreamedRunResult.is_structured] returns `True`.
216
-
217
210
  !!! note
218
211
  Result validators will NOT be called on the text result if `delta=True`.
219
212
 
@@ -224,54 +217,65 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
224
217
  Debouncing is particularly important for long structured responses to reduce the overhead of
225
218
  performing validation as each token is received.
226
219
  """
220
+ if self._result_schema and not self._result_schema.allow_text_result:
221
+ raise exceptions.UserError('stream_text() can only be used with text responses')
222
+
227
223
  usage_checking_stream = _get_usage_checking_stream_response(
228
224
  self._stream_response, self._usage_limits, self.usage
229
225
  )
230
226
 
227
+ # Define a "merged" version of the iterator that will yield items that have already been retrieved
228
+ # and items that we receive while streaming. We define a dedicated async iterator for this so we can
229
+ # pass the combined stream to the group_by_temporal function within `_stream_text_deltas` below.
230
+ async def _stream_text_deltas_ungrouped() -> AsyncIterator[tuple[str, int]]:
231
+ # if the response currently has any parts with content, yield those before streaming
232
+ msg = self._stream_response.get()
233
+ for i, part in enumerate(msg.parts):
234
+ if isinstance(part, _messages.TextPart) and part.content:
235
+ yield part.content, i
236
+
237
+ async for event in usage_checking_stream:
238
+ if (
239
+ isinstance(event, _messages.PartStartEvent)
240
+ and isinstance(event.part, _messages.TextPart)
241
+ and event.part.content
242
+ ):
243
+ yield event.part.content, event.index
244
+ elif (
245
+ isinstance(event, _messages.PartDeltaEvent)
246
+ and isinstance(event.delta, _messages.TextPartDelta)
247
+ and event.delta.content_delta
248
+ ):
249
+ yield event.delta.content_delta, event.index
250
+
251
+ async def _stream_text_deltas() -> AsyncIterator[str]:
252
+ async with _utils.group_by_temporal(_stream_text_deltas_ungrouped(), debounce_by) as group_iter:
253
+ async for items in group_iter:
254
+ yield ''.join([content for content, _ in items])
255
+
231
256
  with _logfire.span('response stream text') as lf_span:
232
- if isinstance(self._stream_response, models.StreamStructuredResponse):
233
- raise exceptions.UserError('stream_text() can only be used with text responses')
234
257
  if delta:
235
- async with _utils.group_by_temporal(usage_checking_stream, debounce_by) as group_iter:
236
- async for _ in group_iter:
237
- yield ''.join(self._stream_response.get())
238
- final_delta = ''.join(self._stream_response.get(final=True))
239
- if final_delta:
240
- yield final_delta
258
+ async for text in _stream_text_deltas():
259
+ yield text
241
260
  else:
242
261
  # a quick benchmark shows it's faster to build up a string with concat when we're
243
262
  # yielding at each step
244
- chunks: list[str] = []
245
- combined = ''
246
- async with _utils.group_by_temporal(usage_checking_stream, debounce_by) as group_iter:
247
- async for _ in group_iter:
248
- new = False
249
- for chunk in self._stream_response.get():
250
- chunks.append(chunk)
251
- new = True
252
- if new:
253
- combined = await self._validate_text_result(''.join(chunks))
254
- yield combined
255
-
256
- new = False
257
- for chunk in self._stream_response.get(final=True):
258
- chunks.append(chunk)
259
- new = True
260
- if new:
261
- combined = await self._validate_text_result(''.join(chunks))
262
- yield combined
263
- lf_span.set_attribute('combined_text', combined)
264
- await self._marked_completed(_messages.ModelResponse.from_text(combined))
263
+ deltas: list[str] = []
264
+ combined_validated_text = ''
265
+ async for text in _stream_text_deltas():
266
+ deltas.append(text)
267
+ combined_text = ''.join(deltas)
268
+ combined_validated_text = await self._validate_text_result(combined_text)
269
+ yield combined_validated_text
270
+
271
+ lf_span.set_attribute('combined_text', combined_validated_text)
272
+ await self._marked_completed(_messages.ModelResponse.from_text(combined_validated_text))
265
273
 
266
274
  async def stream_structured(
267
275
  self, *, debounce_by: float | None = 0.1
268
276
  ) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
269
277
  """Stream the response as an async iterable of Structured LLM Messages.
270
278
 
271
- !!! note
272
- This method will fail if the response is text,
273
- e.g. if [`is_structured`][pydantic_ai.result.StreamedRunResult.is_structured] returns `False`.
274
-
275
279
  Args:
276
280
  debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
277
281
  Debouncing is particularly important for long structured responses to reduce the overhead of
@@ -285,24 +289,20 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
285
289
  )
286
290
 
287
291
  with _logfire.span('response stream structured') as lf_span:
288
- if isinstance(self._stream_response, models.StreamTextResponse):
289
- raise exceptions.UserError('stream_structured() can only be used with structured responses')
290
- else:
291
- # we should already have a message at this point, yield that first if it has any content
292
+ # if the message currently has any parts with content, yield before streaming
293
+ msg = self._stream_response.get()
294
+ for part in msg.parts:
295
+ if part.has_content():
296
+ yield msg, False
297
+ break
298
+
299
+ async with _utils.group_by_temporal(usage_checking_stream, debounce_by) as group_iter:
300
+ async for _events in group_iter:
301
+ msg = self._stream_response.get()
302
+ yield msg, False
292
303
  msg = self._stream_response.get()
293
- for item in msg.parts:
294
- if isinstance(item, _messages.ToolCallPart) and item.has_content():
295
- yield msg, False
296
- break
297
- async with _utils.group_by_temporal(usage_checking_stream, debounce_by) as group_iter:
298
- async for _ in group_iter:
299
- msg = self._stream_response.get()
300
- for item in msg.parts:
301
- if isinstance(item, _messages.ToolCallPart) and item.has_content():
302
- yield msg, False
303
- break
304
- msg = self._stream_response.get(final=True)
305
304
  yield msg, True
305
+ # TODO: Should this now be `final_response` instead of `structured_response`?
306
306
  lf_span.set_attribute('structured_response', msg)
307
307
  await self._marked_completed(msg)
308
308
 
@@ -314,21 +314,9 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
314
314
 
315
315
  async for _ in usage_checking_stream:
316
316
  pass
317
-
318
- if isinstance(self._stream_response, models.StreamTextResponse):
319
- text = ''.join(self._stream_response.get(final=True))
320
- text = await self._validate_text_result(text)
321
- await self._marked_completed(_messages.ModelResponse.from_text(text))
322
- return cast(ResultData, text)
323
- else:
324
- message = self._stream_response.get(final=True)
325
- await self._marked_completed(message)
326
- return await self.validate_structured_result(message)
327
-
328
- @property
329
- def is_structured(self) -> bool:
330
- """Return whether the stream response contains structured data (as opposed to text)."""
331
- return isinstance(self._stream_response, models.StreamStructuredResponse)
317
+ message = self._stream_response.get()
318
+ await self._marked_completed(message)
319
+ return await self.validate_structured_result(message)
332
320
 
333
321
  def usage(self) -> Usage:
334
322
  """Return the usage of the whole run.
@@ -346,20 +334,29 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
346
334
  self, message: _messages.ModelResponse, *, allow_partial: bool = False
347
335
  ) -> ResultData:
348
336
  """Validate a structured result message."""
349
- assert self._result_schema is not None, 'Expected _result_schema to not be None'
350
- assert self._result_tool_name is not None, 'Expected _result_tool_name to not be None'
351
- match = self._result_schema.find_named_tool(message.parts, self._result_tool_name)
352
- if match is None:
353
- raise exceptions.UnexpectedModelBehavior(
354
- f'Invalid message, unable to find tool: {self._result_schema.tool_names()}'
355
- )
356
-
357
- call, result_tool = match
358
- result_data = result_tool.validate(call, allow_partial=allow_partial, wrap_validation_errors=False)
359
-
360
- for validator in self._result_validators:
361
- result_data = await validator.validate(result_data, call, self._run_ctx)
362
- return result_data
337
+ if self._result_schema is not None and self._result_tool_name is not None:
338
+ match = self._result_schema.find_named_tool(message.parts, self._result_tool_name)
339
+ if match is None:
340
+ raise exceptions.UnexpectedModelBehavior(
341
+ f'Invalid response, unable to find tool: {self._result_schema.tool_names()}'
342
+ )
343
+
344
+ call, result_tool = match
345
+ result_data = result_tool.validate(call, allow_partial=allow_partial, wrap_validation_errors=False)
346
+
347
+ for validator in self._result_validators:
348
+ result_data = await validator.validate(result_data, call, self._run_ctx)
349
+ return result_data
350
+ else:
351
+ text = '\n\n'.join(x.content for x in message.parts if isinstance(x, _messages.TextPart))
352
+ for validator in self._result_validators:
353
+ text = await validator.validate(
354
+ text, # pyright: ignore[reportArgumentType]
355
+ None,
356
+ self._run_ctx,
357
+ )
358
+ # Since there is no result tool, we can assume that str is compatible with ResultData
359
+ return cast(ResultData, text)
363
360
 
364
361
  async def _validate_text_result(self, text: str) -> str:
365
362
  for validator in self._result_validators:
@@ -377,8 +374,10 @@ class StreamedRunResult(_BaseRunResult[ResultData], Generic[AgentDeps, ResultDat
377
374
 
378
375
 
379
376
  def _get_usage_checking_stream_response(
380
- stream_response: AsyncIterator[ResultData], limits: UsageLimits | None, get_usage: Callable[[], Usage]
381
- ) -> AsyncIterator[ResultData]:
377
+ stream_response: AsyncIterable[_messages.ModelResponseStreamEvent],
378
+ limits: UsageLimits | None,
379
+ get_usage: Callable[[], Usage],
380
+ ) -> AsyncIterable[_messages.ModelResponseStreamEvent]:
382
381
  if limits is not None and limits.has_token_limits():
383
382
 
384
383
  async def _usage_checking_iterator():
pydantic_ai/tools.py CHANGED
@@ -4,7 +4,7 @@ import dataclasses
4
4
  import inspect
5
5
  from collections.abc import Awaitable
6
6
  from dataclasses import dataclass, field
7
- from typing import TYPE_CHECKING, Any, Callable, Generic, Union, cast
7
+ from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union, cast
8
8
 
9
9
  from pydantic import ValidationError
10
10
  from pydantic_core import SchemaValidator
@@ -18,6 +18,7 @@ if TYPE_CHECKING:
18
18
 
19
19
  __all__ = (
20
20
  'AgentDeps',
21
+ 'DocstringFormat',
21
22
  'RunContext',
22
23
  'SystemPromptFunc',
23
24
  'ToolFuncContext',
@@ -106,7 +107,7 @@ See [tool docs](../tools.md#tool-prepare) for more information.
106
107
 
107
108
  Example — here `only_if_42` is valid as a `ToolPrepareFunc`:
108
109
 
109
- ```python {lint="not-imports"}
110
+ ```python {noqa="I001"}
110
111
  from typing import Union
111
112
 
112
113
  from pydantic_ai import RunContext, Tool
@@ -127,6 +128,15 @@ hitchhiker = Tool(hitchhiker, prepare=only_if_42)
127
128
  Usage `ToolPrepareFunc[AgentDeps]`.
128
129
  """
129
130
 
131
+ DocstringFormat = Literal['google', 'numpy', 'sphinx', 'auto']
132
+ """Supported docstring formats.
133
+
134
+ * `'google'` — [Google-style](https://google.github.io/styleguide/pyguide.html#381-docstrings) docstrings.
135
+ * `'numpy'` — [Numpy-style](https://numpydoc.readthedocs.io/en/latest/format.html) docstrings.
136
+ * `'sphinx'` — [Sphinx-style](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html#the-sphinx-docstring-format) docstrings.
137
+ * `'auto'` — Automatically infer the format based on the structure of the docstring.
138
+ """
139
+
130
140
  A = TypeVar('A')
131
141
 
132
142
 
@@ -140,6 +150,8 @@ class Tool(Generic[AgentDeps]):
140
150
  name: str
141
151
  description: str
142
152
  prepare: ToolPrepareFunc[AgentDeps] | None
153
+ docstring_format: DocstringFormat
154
+ require_parameter_descriptions: bool
143
155
  _is_async: bool = field(init=False)
144
156
  _single_arg_name: str | None = field(init=False)
145
157
  _positional_fields: list[str] = field(init=False)
@@ -157,12 +169,14 @@ class Tool(Generic[AgentDeps]):
157
169
  name: str | None = None,
158
170
  description: str | None = None,
159
171
  prepare: ToolPrepareFunc[AgentDeps] | None = None,
172
+ docstring_format: DocstringFormat = 'auto',
173
+ require_parameter_descriptions: bool = False,
160
174
  ):
161
175
  """Create a new tool instance.
162
176
 
163
177
  Example usage:
164
178
 
165
- ```python {lint="not-imports"}
179
+ ```python {noqa="I001"}
166
180
  from pydantic_ai import Agent, RunContext, Tool
167
181
 
168
182
  async def my_tool(ctx: RunContext[int], x: int, y: int) -> str:
@@ -173,7 +187,7 @@ class Tool(Generic[AgentDeps]):
173
187
 
174
188
  or with a custom prepare method:
175
189
 
176
- ```python {lint="not-imports"}
190
+ ```python {noqa="I001"}
177
191
  from typing import Union
178
192
 
179
193
  from pydantic_ai import Agent, RunContext, Tool
@@ -203,17 +217,22 @@ class Tool(Generic[AgentDeps]):
203
217
  prepare: custom method to prepare the tool definition for each step, return `None` to omit this
204
218
  tool from a given step. This is useful if you want to customise a tool at call time,
205
219
  or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
220
+ docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
221
+ Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
222
+ require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False.
206
223
  """
207
224
  if takes_ctx is None:
208
225
  takes_ctx = _pydantic.takes_ctx(function)
209
226
 
210
- f = _pydantic.function_schema(function, takes_ctx)
227
+ f = _pydantic.function_schema(function, takes_ctx, docstring_format, require_parameter_descriptions)
211
228
  self.function = function
212
229
  self.takes_ctx = takes_ctx
213
230
  self.max_retries = max_retries
214
231
  self.name = name or function.__name__
215
232
  self.description = description or f['description']
216
233
  self.prepare = prepare
234
+ self.docstring_format = docstring_format
235
+ self.require_parameter_descriptions = require_parameter_descriptions
217
236
  self._is_async = inspect.iscoroutinefunction(self.function)
218
237
  self._single_arg_name = f['single_arg_name']
219
238
  self._positional_fields = f['positional_fields']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.17
3
+ Version: 0.0.19
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -31,6 +31,8 @@ Requires-Dist: logfire-api>=1.2.0
31
31
  Requires-Dist: pydantic>=2.10
32
32
  Provides-Extra: anthropic
33
33
  Requires-Dist: anthropic>=0.40.0; extra == 'anthropic'
34
+ Provides-Extra: graph
35
+ Requires-Dist: pydantic-graph==0.0.19; extra == 'graph'
34
36
  Provides-Extra: groq
35
37
  Requires-Dist: groq>=0.12.0; extra == 'groq'
36
38
  Provides-Extra: logfire
@@ -0,0 +1,29 @@
1
+ pydantic_ai/__init__.py,sha256=FbYetEgT6OO25u2KF5ZnFxKpz5DtnSpfckRXP4mjl8E,489
2
+ pydantic_ai/_griffe.py,sha256=RYRKiLbgG97QxnazbAwlnc74XxevGHLQet-FGfq9qls,3960
3
+ pydantic_ai/_parts_manager.py,sha256=pMDZs6BGC8EmaNa-73QvuptmxdG2MhBrBLIydCOl-gM,11886
4
+ pydantic_ai/_pydantic.py,sha256=Zvjd2te6EzPrFnz--oDSdqwZuPw3vCiflTHriRhpNsY,8698
5
+ pydantic_ai/_result.py,sha256=cUSugZQV0n5Z4fFHiMqua-2xs_0S6m-rr-yd6QS3nFE,10317
6
+ pydantic_ai/_system_prompt.py,sha256=Fsl1K6GdQP0WhWBzvJxCc5uTqCD06lHjJlTADah-PI0,1116
7
+ pydantic_ai/_utils.py,sha256=EHW866W6ZpGJLCWtoEAcwIPeWo9OQFhnD5el2DwVcwc,10949
8
+ pydantic_ai/agent.py,sha256=Z_79gw4BIJooBIqJwPbnDHvmBcCXp2dbNd_832tc_do,62500
9
+ pydantic_ai/exceptions.py,sha256=eGDKX6bGhgVxXBzu81Sk3iiAkXr0GUtgT7bD5Rxlqpg,2028
10
+ pydantic_ai/format_as_xml.py,sha256=QE7eMlg5-YUMw1_2kcI3h0uKYPZZyGkgXFDtfZTMeeI,4480
11
+ pydantic_ai/messages.py,sha256=b4RpaXogREquE8WHlGPMm0UGTNx2QtePV5GYk-9EscY,18185
12
+ pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ pydantic_ai/result.py,sha256=93ZLxr2jPx0cZeslHgphJ6XJnQMEybktzQ_LUT47h3Q,17429
14
+ pydantic_ai/settings.py,sha256=oTk8ZfYuUsNxpJMWLvSrO1OH_0ur7VKgDNTMQG0tPyM,1974
15
+ pydantic_ai/tools.py,sha256=iwa2PyhnKmvh_njy4aMfRIh7AP5igDIZ1ZPvgvvn6bM,13018
16
+ pydantic_ai/usage.py,sha256=60d9f6M7YEYuKMbqDGDogX4KsA73fhDtWyDXYXoIPaI,4948
17
+ pydantic_ai/models/__init__.py,sha256=Q4_fHy48szaA_TrIW3LZhRXiDUlhPAYf8LkhinSP3s8,10883
18
+ pydantic_ai/models/anthropic.py,sha256=MkFqy2F7SPb_qAbgzc04iZWmVuoEBgn30v1HY1Wjadc,13543
19
+ pydantic_ai/models/function.py,sha256=iT4XT8VEaJbNwYJAWtrI_jbRAb2tZO6UL93ErR4RYhM,9629
20
+ pydantic_ai/models/gemini.py,sha256=3RTVQBAI1jWL3Xx_hi7qdy_6H-kTeuAOTPELnlVtPp4,27498
21
+ pydantic_ai/models/groq.py,sha256=kzQSFT-04WmQmdRaB6Wj0mxHeAXIgyrryZkptNiA4Ng,13211
22
+ pydantic_ai/models/mistral.py,sha256=qyYOLBpOdI5iPBmQxf5jp1d17sxqa1r8GJ7tb4yE45U,24549
23
+ pydantic_ai/models/ollama.py,sha256=aHI8pNw7fqOOgvlEWcTnTYTmhf0cGg41x-p5sUQr2_k,4200
24
+ pydantic_ai/models/openai.py,sha256=FzV6OCuK4Sr_J2GTuM-6Vu9NbDyZPxllwQPmssdOtbQ,13774
25
+ pydantic_ai/models/test.py,sha256=0m2Pdn0xJMjvAVekVIoADQL0aSkOnGZJct9k4WvImrQ,15880
26
+ pydantic_ai/models/vertexai.py,sha256=dHGrmLMgekWAEOZkLsO5rwDtQ6mjPixvn0umlvWAZok,9323
27
+ pydantic_ai_slim-0.0.19.dist-info/METADATA,sha256=lnGlda0-tCapsWI72DyzGV9Sppm5I7koWbb7-xEpWcU,2808
28
+ pydantic_ai_slim-0.0.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
+ pydantic_ai_slim-0.0.19.dist-info/RECORD,,
@@ -1,28 +0,0 @@
1
- pydantic_ai/__init__.py,sha256=FbYetEgT6OO25u2KF5ZnFxKpz5DtnSpfckRXP4mjl8E,489
2
- pydantic_ai/_griffe.py,sha256=Wqk3AuyeWuPwE5s1GbMeCsERelx1B4QcU9uYZSoko8s,3409
3
- pydantic_ai/_pydantic.py,sha256=qXi5IsyiYOHeg_-qozCdxkfeqw2z0gBTjqgywBCiJWo,8125
4
- pydantic_ai/_result.py,sha256=cUSugZQV0n5Z4fFHiMqua-2xs_0S6m-rr-yd6QS3nFE,10317
5
- pydantic_ai/_system_prompt.py,sha256=MZJWksIoS5GM3Au5lznlcQnC-h7eqwtE7oI5WFgRcOg,1090
6
- pydantic_ai/_utils.py,sha256=skWNgm89US_x1EpxdRy5wCkghBrm1XgxFCiEh6wAkAo,8753
7
- pydantic_ai/agent.py,sha256=8v7gyfMKB76k04SabQNV3QtUz80fSSL2BofULWwYO-o,52514
8
- pydantic_ai/exceptions.py,sha256=eGDKX6bGhgVxXBzu81Sk3iiAkXr0GUtgT7bD5Rxlqpg,2028
9
- pydantic_ai/format_as_xml.py,sha256=Gm65687GL8Z6A_lPiJWL1O_E3ovHEBn2O1DKhn1CDnA,4472
10
- pydantic_ai/messages.py,sha256=ImbWY8Ft3mxInUQ08EmIWywf4nJBvTiJhmsECRYDkSQ,8968
11
- pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- pydantic_ai/result.py,sha256=-dpaaD24E1Ns7fxz5Gn7SKou-A8Cag4LjEyCBJbrHzY,17597
13
- pydantic_ai/settings.py,sha256=oTk8ZfYuUsNxpJMWLvSrO1OH_0ur7VKgDNTMQG0tPyM,1974
14
- pydantic_ai/tools.py,sha256=G4lwAb7QIowtSHk7w5cH8WQFIFqwMPn0J6Nqhgz7ubA,11757
15
- pydantic_ai/usage.py,sha256=60d9f6M7YEYuKMbqDGDogX4KsA73fhDtWyDXYXoIPaI,4948
16
- pydantic_ai/models/__init__.py,sha256=XHt02IDQAircb-lEkIbIcuabSAIh5_UKnz2V1xN0Glw,10926
17
- pydantic_ai/models/anthropic.py,sha256=EUZgmvT0jhMDbooBp_jfW0z2cM5jTMuAhVws1XKgaNs,13451
18
- pydantic_ai/models/function.py,sha256=i7qkS_31aHrTbYVh6OzQ7Cwucz44F5PjT2EJK3GMphw,10573
19
- pydantic_ai/models/gemini.py,sha256=jHBVJFLgp7kPLXYy1zYTs_-ush9qS2fkmC28hK8vkJ0,28417
20
- pydantic_ai/models/groq.py,sha256=ZoPkuWJrf78JPnTRfZhi7v0ETgxJKNN5dH8BLWagGGk,15770
21
- pydantic_ai/models/mistral.py,sha256=xGVI6-b8-9vnFickPPI2cRaHEWLc0jKKUM_vMjipf-U,25894
22
- pydantic_ai/models/ollama.py,sha256=ELqxhcNcnvQBnadd3gukS01zprUp6v8N_h1P5K-uf6c,4188
23
- pydantic_ai/models/openai.py,sha256=qFFInL3NbgfGcsAWigxMP5mscp76hC-jJimHc9woU6Y,16518
24
- pydantic_ai/models/test.py,sha256=u2pdZd9OLXQ_jI6CaVt96udXuIcv0Hfnfqd3pFGmeJM,16514
25
- pydantic_ai/models/vertexai.py,sha256=gBlEGBIOoqGHYqu6d16VLRI0rWizx5I7P2s8IuGM1CQ,9318
26
- pydantic_ai_slim-0.0.17.dist-info/METADATA,sha256=hhVw5I9w5RQba3Dvsi3dKP9KUuFCfMuehHeGSQhhOmQ,2730
27
- pydantic_ai_slim-0.0.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
- pydantic_ai_slim-0.0.17.dist-info/RECORD,,