pydantic-ai-slim 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/_agent_graph.py +7 -1
- pydantic_ai/agent.py +1 -0
- pydantic_ai/messages.py +20 -11
- pydantic_ai/models/anthropic.py +7 -9
- pydantic_ai/models/instrumented.py +5 -1
- pydantic_ai/tools.py +3 -2
- {pydantic_ai_slim-0.3.3.dist-info → pydantic_ai_slim-0.3.4.dist-info}/METADATA +4 -4
- {pydantic_ai_slim-0.3.3.dist-info → pydantic_ai_slim-0.3.4.dist-info}/RECORD +11 -11
- {pydantic_ai_slim-0.3.3.dist-info → pydantic_ai_slim-0.3.4.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.3.3.dist-info → pydantic_ai_slim-0.3.4.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-0.3.3.dist-info → pydantic_ai_slim-0.3.4.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/_agent_graph.py
CHANGED
|
@@ -24,6 +24,7 @@ from .tools import RunContext, Tool, ToolDefinition, ToolsPrepareFunc
|
|
|
24
24
|
|
|
25
25
|
if TYPE_CHECKING:
|
|
26
26
|
from .mcp import MCPServer
|
|
27
|
+
from .models.instrumented import InstrumentationSettings
|
|
27
28
|
|
|
28
29
|
__all__ = (
|
|
29
30
|
'GraphAgentState',
|
|
@@ -112,6 +113,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
112
113
|
default_retries: int
|
|
113
114
|
|
|
114
115
|
tracer: Tracer
|
|
116
|
+
instrumentation_settings: InstrumentationSettings | None = None
|
|
115
117
|
|
|
116
118
|
prepare_tools: ToolsPrepareFunc[DepsT] | None = None
|
|
117
119
|
|
|
@@ -712,6 +714,10 @@ async def process_function_tools( # noqa C901
|
|
|
712
714
|
|
|
713
715
|
user_parts: list[_messages.UserPromptPart] = []
|
|
714
716
|
|
|
717
|
+
include_content = (
|
|
718
|
+
ctx.deps.instrumentation_settings is not None and ctx.deps.instrumentation_settings.include_content
|
|
719
|
+
)
|
|
720
|
+
|
|
715
721
|
# Run all tool tasks in parallel
|
|
716
722
|
results_by_index: dict[int, _messages.ModelRequestPart] = {}
|
|
717
723
|
with ctx.deps.tracer.start_as_current_span(
|
|
@@ -722,7 +728,7 @@ async def process_function_tools( # noqa C901
|
|
|
722
728
|
},
|
|
723
729
|
):
|
|
724
730
|
tasks = [
|
|
725
|
-
asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer), name=call.tool_name)
|
|
731
|
+
asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer, include_content), name=call.tool_name)
|
|
726
732
|
for tool, call in calls_to_run
|
|
727
733
|
]
|
|
728
734
|
|
pydantic_ai/agent.py
CHANGED
|
@@ -719,6 +719,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
719
719
|
tracer=tracer,
|
|
720
720
|
prepare_tools=self._prepare_tools,
|
|
721
721
|
get_instructions=get_instructions,
|
|
722
|
+
instrumentation_settings=instrumentation_settings,
|
|
722
723
|
)
|
|
723
724
|
start_node = _agent_graph.UserPromptNode[AgentDepsT](
|
|
724
725
|
user_prompt=user_prompt,
|
pydantic_ai/messages.py
CHANGED
|
@@ -76,8 +76,11 @@ class SystemPromptPart:
|
|
|
76
76
|
part_kind: Literal['system-prompt'] = 'system-prompt'
|
|
77
77
|
"""Part type identifier, this is available on all parts as a discriminator."""
|
|
78
78
|
|
|
79
|
-
def otel_event(self,
|
|
80
|
-
return Event(
|
|
79
|
+
def otel_event(self, settings: InstrumentationSettings) -> Event:
|
|
80
|
+
return Event(
|
|
81
|
+
'gen_ai.system.message',
|
|
82
|
+
body={'role': 'system', **({'content': self.content} if settings.include_content else {})},
|
|
83
|
+
)
|
|
81
84
|
|
|
82
85
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
83
86
|
|
|
@@ -362,12 +365,12 @@ class UserPromptPart:
|
|
|
362
365
|
content = []
|
|
363
366
|
for part in self.content:
|
|
364
367
|
if isinstance(part, str):
|
|
365
|
-
content.append(part)
|
|
368
|
+
content.append(part if settings.include_content else {'kind': 'text'})
|
|
366
369
|
elif isinstance(part, (ImageUrl, AudioUrl, DocumentUrl, VideoUrl)):
|
|
367
|
-
content.append({'kind': part.kind, 'url': part.url})
|
|
370
|
+
content.append({'kind': part.kind, **({'url': part.url} if settings.include_content else {})})
|
|
368
371
|
elif isinstance(part, BinaryContent):
|
|
369
372
|
converted_part = {'kind': part.kind, 'media_type': part.media_type}
|
|
370
|
-
if settings.include_binary_content:
|
|
373
|
+
if settings.include_content and settings.include_binary_content:
|
|
371
374
|
converted_part['binary_content'] = base64.b64encode(part.data).decode()
|
|
372
375
|
content.append(converted_part)
|
|
373
376
|
else:
|
|
@@ -414,10 +417,15 @@ class ToolReturnPart:
|
|
|
414
417
|
else:
|
|
415
418
|
return {'return_value': tool_return_ta.dump_python(self.content, mode='json')}
|
|
416
419
|
|
|
417
|
-
def otel_event(self,
|
|
420
|
+
def otel_event(self, settings: InstrumentationSettings) -> Event:
|
|
418
421
|
return Event(
|
|
419
422
|
'gen_ai.tool.message',
|
|
420
|
-
body={
|
|
423
|
+
body={
|
|
424
|
+
**({'content': self.content} if settings.include_content else {}),
|
|
425
|
+
'role': 'tool',
|
|
426
|
+
'id': self.tool_call_id,
|
|
427
|
+
'name': self.tool_name,
|
|
428
|
+
},
|
|
421
429
|
)
|
|
422
430
|
|
|
423
431
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
@@ -473,14 +481,14 @@ class RetryPromptPart:
|
|
|
473
481
|
description = f'{len(self.content)} validation errors: {json_errors.decode()}'
|
|
474
482
|
return f'{description}\n\nFix the errors and try again.'
|
|
475
483
|
|
|
476
|
-
def otel_event(self,
|
|
484
|
+
def otel_event(self, settings: InstrumentationSettings) -> Event:
|
|
477
485
|
if self.tool_name is None:
|
|
478
486
|
return Event('gen_ai.user.message', body={'content': self.model_response(), 'role': 'user'})
|
|
479
487
|
else:
|
|
480
488
|
return Event(
|
|
481
489
|
'gen_ai.tool.message',
|
|
482
490
|
body={
|
|
483
|
-
'content': self.model_response(),
|
|
491
|
+
**({'content': self.model_response()} if settings.include_content else {}),
|
|
484
492
|
'role': 'tool',
|
|
485
493
|
'id': self.tool_call_id,
|
|
486
494
|
'name': self.tool_name,
|
|
@@ -657,7 +665,7 @@ class ModelResponse:
|
|
|
657
665
|
vendor_id: str | None = None
|
|
658
666
|
"""Vendor ID as specified by the model provider. This can be used to track the specific request to the model."""
|
|
659
667
|
|
|
660
|
-
def otel_events(self) -> list[Event]:
|
|
668
|
+
def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
|
|
661
669
|
"""Return OpenTelemetry events for the response."""
|
|
662
670
|
result: list[Event] = []
|
|
663
671
|
|
|
@@ -683,7 +691,8 @@ class ModelResponse:
|
|
|
683
691
|
elif isinstance(part, TextPart):
|
|
684
692
|
if body.get('content'):
|
|
685
693
|
body = new_event_body()
|
|
686
|
-
|
|
694
|
+
if settings.include_content:
|
|
695
|
+
body['content'] = part.content
|
|
687
696
|
|
|
688
697
|
return result
|
|
689
698
|
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -342,15 +342,13 @@ class AnthropicModel(Model):
|
|
|
342
342
|
if response_part.content: # Only add non-empty text
|
|
343
343
|
assistant_content_params.append(BetaTextBlockParam(text=response_part.content, type='text'))
|
|
344
344
|
elif isinstance(response_part, ThinkingPart):
|
|
345
|
-
# NOTE: We
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
# )
|
|
353
|
-
pass
|
|
345
|
+
# NOTE: We only send thinking part back for Anthropic, otherwise they raise an error.
|
|
346
|
+
if response_part.signature is not None: # pragma: no branch
|
|
347
|
+
assistant_content_params.append(
|
|
348
|
+
BetaThinkingBlockParam(
|
|
349
|
+
thinking=response_part.content, signature=response_part.signature, type='thinking'
|
|
350
|
+
)
|
|
351
|
+
)
|
|
354
352
|
else:
|
|
355
353
|
tool_use_block_param = BetaToolUseBlockParam(
|
|
356
354
|
id=_guard_tool_call_id(t=response_part),
|
|
@@ -92,6 +92,7 @@ class InstrumentationSettings:
|
|
|
92
92
|
meter_provider: MeterProvider | None = None,
|
|
93
93
|
event_logger_provider: EventLoggerProvider | None = None,
|
|
94
94
|
include_binary_content: bool = True,
|
|
95
|
+
include_content: bool = True,
|
|
95
96
|
):
|
|
96
97
|
"""Create instrumentation options.
|
|
97
98
|
|
|
@@ -109,6 +110,8 @@ class InstrumentationSettings:
|
|
|
109
110
|
Calling `logfire.configure()` sets the global event logger provider, so most users don't need this.
|
|
110
111
|
This is only used if `event_mode='logs'`.
|
|
111
112
|
include_binary_content: Whether to include binary content in the instrumentation events.
|
|
113
|
+
include_content: Whether to include prompts, completions, and tool call arguments and responses
|
|
114
|
+
in the instrumentation events.
|
|
112
115
|
"""
|
|
113
116
|
from pydantic_ai import __version__
|
|
114
117
|
|
|
@@ -121,6 +124,7 @@ class InstrumentationSettings:
|
|
|
121
124
|
self.event_logger = event_logger_provider.get_event_logger(scope_name, __version__)
|
|
122
125
|
self.event_mode = event_mode
|
|
123
126
|
self.include_binary_content = include_binary_content
|
|
127
|
+
self.include_content = include_content
|
|
124
128
|
|
|
125
129
|
# As specified in the OpenTelemetry GenAI metrics spec:
|
|
126
130
|
# https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-metrics/#metric-gen_aiclienttokenusage
|
|
@@ -161,7 +165,7 @@ class InstrumentationSettings:
|
|
|
161
165
|
if hasattr(part, 'otel_event'):
|
|
162
166
|
message_events.append(part.otel_event(self))
|
|
163
167
|
elif isinstance(message, ModelResponse): # pragma: no branch
|
|
164
|
-
message_events = message.otel_events()
|
|
168
|
+
message_events = message.otel_events(self)
|
|
165
169
|
for event in message_events:
|
|
166
170
|
event.attributes = {
|
|
167
171
|
'gen_ai.message.index': message_index,
|
pydantic_ai/tools.py
CHANGED
|
@@ -327,6 +327,7 @@ class Tool(Generic[AgentDepsT]):
|
|
|
327
327
|
message: _messages.ToolCallPart,
|
|
328
328
|
run_context: RunContext[AgentDepsT],
|
|
329
329
|
tracer: Tracer,
|
|
330
|
+
include_content: bool = False,
|
|
330
331
|
) -> _messages.ToolReturnPart | _messages.RetryPromptPart:
|
|
331
332
|
"""Run the tool function asynchronously.
|
|
332
333
|
|
|
@@ -338,14 +339,14 @@ class Tool(Generic[AgentDepsT]):
|
|
|
338
339
|
'gen_ai.tool.name': self.name,
|
|
339
340
|
# NOTE: this means `gen_ai.tool.call.id` will be included even if it was generated by pydantic-ai
|
|
340
341
|
'gen_ai.tool.call.id': message.tool_call_id,
|
|
341
|
-
'tool_arguments': message.args_as_json_str(),
|
|
342
|
+
**({'tool_arguments': message.args_as_json_str()} if include_content else {}),
|
|
342
343
|
'logfire.msg': f'running tool: {self.name}',
|
|
343
344
|
# add the JSON schema so these attributes are formatted nicely in Logfire
|
|
344
345
|
'logfire.json_schema': json.dumps(
|
|
345
346
|
{
|
|
346
347
|
'type': 'object',
|
|
347
348
|
'properties': {
|
|
348
|
-
'tool_arguments': {'type': 'object'},
|
|
349
|
+
**({'tool_arguments': {'type': 'object'}} if include_content else {}),
|
|
349
350
|
'gen_ai.tool.name': {},
|
|
350
351
|
'gen_ai.tool.call.id': {},
|
|
351
352
|
},
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.4
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.3.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.3.4
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.3.
|
|
37
|
+
Requires-Dist: fasta2a==0.3.4; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.3.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.3.4; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
pydantic_ai/__init__.py,sha256=Ns04g4Efqkzwccs8w2nGphfWbptMlIJYG8vIJbGGyG0,1262
|
|
2
2
|
pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
|
|
3
3
|
pydantic_ai/_a2a.py,sha256=8nNtx6GENDt2Ej3f1ui9L-FuNQBYVELpJFfwz-y7fUw,7234
|
|
4
|
-
pydantic_ai/_agent_graph.py,sha256=
|
|
4
|
+
pydantic_ai/_agent_graph.py,sha256=pAP2bbO1TenFZO3alaW-5QTMk2uN7lWfFfgWe7zAAAU,40571
|
|
5
5
|
pydantic_ai/_cli.py,sha256=R-sE-9gYqPxV5-5utso4g-bzAKMiTCdo33XOVqE0ZEg,13206
|
|
6
6
|
pydantic_ai/_function_schema.py,sha256=TP9Y1wlN7tRHTDhkJ1IopJHPWpSbxJCxS3zxCb0dpK4,10806
|
|
7
7
|
pydantic_ai/_griffe.py,sha256=Sf_DisE9k2TA0VFeVIK2nf1oOct5MygW86PBCACJkFA,5244
|
|
@@ -12,18 +12,18 @@ pydantic_ai/_run_context.py,sha256=zNkSyiQSH-YweO39ii3iB2taouUOodo3sTjz2Lrj4Pc,1
|
|
|
12
12
|
pydantic_ai/_system_prompt.py,sha256=lUSq-gDZjlYTGtd6BUm54yEvTIvgdwBmJ8mLsNZZtYU,1142
|
|
13
13
|
pydantic_ai/_thinking_part.py,sha256=mzx2RZSfiQxAKpljEflrcXRXmFKxtp6bKVyorY3UYZk,1554
|
|
14
14
|
pydantic_ai/_utils.py,sha256=JKjM3YIgn2CslJcQKxDcq5nWV57TUTS23x8t3u6v_wA,15025
|
|
15
|
-
pydantic_ai/agent.py,sha256=
|
|
15
|
+
pydantic_ai/agent.py,sha256=Xy92Pi4R0CKrEkPtAXbUBWQi_d4Un1mwXafPBfjjTy8,95584
|
|
16
16
|
pydantic_ai/direct.py,sha256=tXRcQ3fMkykaawO51VxnSwQnqcEmu1LhCy7U9gOyM-g,7768
|
|
17
17
|
pydantic_ai/exceptions.py,sha256=IdFw594Ou7Vn4YFa7xdZ040_j_6nmyA3MPANbC7sys4,3175
|
|
18
18
|
pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
|
|
19
19
|
pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
|
|
20
20
|
pydantic_ai/mcp.py,sha256=MSLNFiC38A1e5W7K8gQdnh0_DNLqD_AdzW064U83FJ8,21836
|
|
21
|
-
pydantic_ai/messages.py,sha256=
|
|
21
|
+
pydantic_ai/messages.py,sha256=AkfJBDaldwpx05AkBvOI-jqTSmx1tn9yt9CUr07VWW0,36742
|
|
22
22
|
pydantic_ai/output.py,sha256=L05UoIwLbQ2LWvk0TuQrAdbbNdWSP6xZt10l61ORVfI,9147
|
|
23
23
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
24
|
pydantic_ai/result.py,sha256=jCnM_KDX9383OoPy4L8YtPmhmlZkJkCufbq2CGxbgK4,25437
|
|
25
25
|
pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
|
|
26
|
-
pydantic_ai/tools.py,sha256=
|
|
26
|
+
pydantic_ai/tools.py,sha256=FFmNShGNh7tpOy9PBd7VDwParxKhbNsNZ-iYw0eHVzY,17117
|
|
27
27
|
pydantic_ai/usage.py,sha256=35YPmItlzfNOwP35Rhh0qBUOlg5On5rUE7xqHQWrpaU,5596
|
|
28
28
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
29
|
pydantic_ai/common_tools/duckduckgo.py,sha256=Ty9tu1rCwMfGKgz1JAaC2q_4esmL6QvpkHQUN8F0Ecc,2152
|
|
@@ -31,7 +31,7 @@ pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQ
|
|
|
31
31
|
pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
32
|
pydantic_ai/ext/langchain.py,sha256=TI8B6eBjEGKFfvwyLgC_-0eeba4hDJq7wLZ0OZhbiWw,1967
|
|
33
33
|
pydantic_ai/models/__init__.py,sha256=baO_rIg9sr_AQVj2Qbz7fgdA-R4eDXriwtkV1u-mS58,29012
|
|
34
|
-
pydantic_ai/models/anthropic.py,sha256=
|
|
34
|
+
pydantic_ai/models/anthropic.py,sha256=YdN4TXPsVoPB8lILz2k9we-7K8thfPo7ikrta-sDd_Y,23676
|
|
35
35
|
pydantic_ai/models/bedrock.py,sha256=67qf_mFnx0kfmKoI96zLOAUn3P47PxPqMrQsaYUrJJ0,29120
|
|
36
36
|
pydantic_ai/models/cohere.py,sha256=UU04-_O-KLgC4DUpM-g4FBPoTOatbmVJJ7mkZNBGsbQ,12626
|
|
37
37
|
pydantic_ai/models/fallback.py,sha256=idOYGMo3CZzpCBT8DDiuPAAgnV2jzluDUq3ESb3KteM,4981
|
|
@@ -39,7 +39,7 @@ pydantic_ai/models/function.py,sha256=nfCjRmbcF7sdK_nsak1fvzz9Xkptx5WhsxvWdB02ze
|
|
|
39
39
|
pydantic_ai/models/gemini.py,sha256=r_DWEmaMvP55duanTLI2SvKC3hAqnDAm2W0-UwLYShI,38485
|
|
40
40
|
pydantic_ai/models/google.py,sha256=fbMG5VEjxHIeoP9JXBgymQtfB8gVnzDU2TTey5FMbbA,23396
|
|
41
41
|
pydantic_ai/models/groq.py,sha256=lojKRdvg0p-EtZ20Z2CS4I0goq4CoGkLj3LuYHA6o-I,18497
|
|
42
|
-
pydantic_ai/models/instrumented.py,sha256=
|
|
42
|
+
pydantic_ai/models/instrumented.py,sha256=olTa7Fl2BwHLvTLT6sSrS2HOS7UyWg182Xujx8hutBw,15947
|
|
43
43
|
pydantic_ai/models/mcp_sampling.py,sha256=dWPJjez_aghF_JVGGCd4nZjlnmxYGLFtUS0vlzfRFyk,3398
|
|
44
44
|
pydantic_ai/models/mistral.py,sha256=Ev2zK2UWh4NrRpc2LQE0LNZFBSznNvZ1mkh4_4VcRMg,30514
|
|
45
45
|
pydantic_ai/models/openai.py,sha256=NUE2WroS0BSxLWlfi-RFGTp0k0iv5zb2LWXQ7-Uz9Hw,53016
|
|
@@ -74,8 +74,8 @@ pydantic_ai/providers/mistral.py,sha256=EIUSENjFuGzBhvbdrarUTM4VPkesIMnZrzfnEKHO
|
|
|
74
74
|
pydantic_ai/providers/openai.py,sha256=7iGij0EaFylab7dTZAZDgXr78tr-HsZrn9EI9AkWBNQ,3091
|
|
75
75
|
pydantic_ai/providers/openrouter.py,sha256=NXjNdnlXIBrBMMqbzcWQnowXOuZh4NHikXenBn5h3mc,4061
|
|
76
76
|
pydantic_ai/providers/together.py,sha256=zFVSMSm5jXbpkNouvBOTjWrPmlPpCp6sQS5LMSyVjrQ,3482
|
|
77
|
-
pydantic_ai_slim-0.3.
|
|
78
|
-
pydantic_ai_slim-0.3.
|
|
79
|
-
pydantic_ai_slim-0.3.
|
|
80
|
-
pydantic_ai_slim-0.3.
|
|
81
|
-
pydantic_ai_slim-0.3.
|
|
77
|
+
pydantic_ai_slim-0.3.4.dist-info/METADATA,sha256=0A68abJj0VyoP1r1LiU35lWhK0WbZfcu6NX5_pe9LBs,3846
|
|
78
|
+
pydantic_ai_slim-0.3.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
79
|
+
pydantic_ai_slim-0.3.4.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
80
|
+
pydantic_ai_slim-0.3.4.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
81
|
+
pydantic_ai_slim-0.3.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|