pydantic-ai-slim 0.2.19__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -10,6 +10,7 @@ from typing import Any, Literal, Union, cast, overload
10
10
 
11
11
  from typing_extensions import assert_never
12
12
 
13
+ from pydantic_ai._thinking_part import split_content_into_text_and_thinking
13
14
  from pydantic_ai.profiles.openai import OpenAIModelProfile
14
15
  from pydantic_ai.providers import Provider, infer_provider
15
16
 
@@ -28,6 +29,7 @@ from ..messages import (
28
29
  RetryPromptPart,
29
30
  SystemPromptPart,
30
31
  TextPart,
32
+ ThinkingPart,
31
33
  ToolCallPart,
32
34
  ToolReturnPart,
33
35
  UserPromptPart,
@@ -137,6 +139,9 @@ class OpenAIResponsesModelSettings(OpenAIModelSettings, total=False):
137
139
  """
138
140
 
139
141
  openai_reasoning_generate_summary: Literal['detailed', 'concise']
142
+ """Deprecated alias for `openai_reasoning_summary`."""
143
+
144
+ openai_reasoning_summary: Literal['detailed', 'concise']
140
145
  """A summary of the reasoning performed by the model.
141
146
 
142
147
  This can be useful for debugging and understanding the model's reasoning process.
@@ -325,6 +330,10 @@ class OpenAIModel(Model):
325
330
  timestamp = number_to_datetime(response.created)
326
331
  choice = response.choices[0]
327
332
  items: list[ModelResponsePart] = []
333
+ # The `reasoning_content` is only present in DeepSeek models.
334
+ if reasoning_content := getattr(choice.message, 'reasoning_content', None):
335
+ items.append(ThinkingPart(content=reasoning_content))
336
+
328
337
  vendor_details: dict[str, Any] | None = None
329
338
 
330
339
  # Add logprobs to vendor_details if available
@@ -345,7 +354,7 @@ class OpenAIModel(Model):
345
354
  }
346
355
 
347
356
  if choice.message.content is not None:
348
- items.append(TextPart(choice.message.content))
357
+ items.extend(split_content_into_text_and_thinking(choice.message.content))
349
358
  if choice.message.tool_calls is not None:
350
359
  for c in choice.message.tool_calls:
351
360
  part = ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)
@@ -394,6 +403,11 @@ class OpenAIModel(Model):
394
403
  for item in message.parts:
395
404
  if isinstance(item, TextPart):
396
405
  texts.append(item.content)
406
+ elif isinstance(item, ThinkingPart):
407
+ # NOTE: We don't send ThinkingPart to the providers yet. If you are unsatisfied with this,
408
+ # please open an issue. The below code is the code to send thinking to the provider.
409
+ # texts.append(f'<think>\n{item.content}\n</think>')
410
+ pass
397
411
  elif isinstance(item, ToolCallPart):
398
412
  tool_calls.append(self._map_tool_call(item))
399
413
  else:
@@ -611,7 +625,12 @@ class OpenAIResponsesModel(Model):
611
625
  items: list[ModelResponsePart] = []
612
626
  items.append(TextPart(response.output_text))
613
627
  for item in response.output:
614
- if item.type == 'function_call':
628
+ if item.type == 'reasoning':
629
+ for summary in item.summary:
630
+ # NOTE: We use the same id for all summaries because we can merge them on the round trip.
631
+ # The providers don't force the signature to be unique.
632
+ items.append(ThinkingPart(content=summary.text, id=item.id))
633
+ elif item.type == 'function_call':
615
634
  items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
616
635
  return ModelResponse(
617
636
  items,
@@ -710,11 +729,22 @@ class OpenAIResponsesModel(Model):
710
729
 
711
730
  def _get_reasoning(self, model_settings: OpenAIResponsesModelSettings) -> Reasoning | NotGiven:
712
731
  reasoning_effort = model_settings.get('openai_reasoning_effort', None)
732
+ reasoning_summary = model_settings.get('openai_reasoning_summary', None)
713
733
  reasoning_generate_summary = model_settings.get('openai_reasoning_generate_summary', None)
714
734
 
715
- if reasoning_effort is None and reasoning_generate_summary is None:
735
+ if reasoning_summary and reasoning_generate_summary: # pragma: no cover
736
+ raise ValueError('`openai_reasoning_summary` and `openai_reasoning_generate_summary` cannot both be set.')
737
+
738
+ if reasoning_generate_summary is not None: # pragma: no cover
739
+ warnings.warn(
740
+ '`openai_reasoning_generate_summary` is deprecated, use `openai_reasoning_summary` instead',
741
+ DeprecationWarning,
742
+ )
743
+ reasoning_summary = reasoning_generate_summary
744
+
745
+ if reasoning_effort is None and reasoning_summary is None:
716
746
  return NOT_GIVEN
717
- return Reasoning(effort=reasoning_effort, generate_summary=reasoning_generate_summary)
747
+ return Reasoning(effort=reasoning_effort, summary=reasoning_summary)
718
748
 
719
749
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.FunctionToolParam]:
720
750
  tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
@@ -770,11 +800,30 @@ class OpenAIResponsesModel(Model):
770
800
  else:
771
801
  assert_never(part)
772
802
  elif isinstance(message, ModelResponse):
803
+ # last_thinking_part_idx: int | None = None
773
804
  for item in message.parts:
774
805
  if isinstance(item, TextPart):
775
806
  openai_messages.append(responses.EasyInputMessageParam(role='assistant', content=item.content))
776
807
  elif isinstance(item, ToolCallPart):
777
808
  openai_messages.append(self._map_tool_call(item))
809
+ elif isinstance(item, ThinkingPart):
810
+ # NOTE: We don't send ThinkingPart to the providers yet. If you are unsatisfied with this,
811
+ # please open an issue. The below code is the code to send thinking to the provider.
812
+ # if last_thinking_part_idx is not None:
813
+ # reasoning_item = cast(responses.ResponseReasoningItemParam, openai_messages[last_thinking_part_idx]) # fmt: skip
814
+ # if item.id == reasoning_item['id']:
815
+ # assert isinstance(reasoning_item['summary'], list)
816
+ # reasoning_item['summary'].append(Summary(text=item.content, type='summary_text'))
817
+ # continue
818
+ # last_thinking_part_idx = len(openai_messages)
819
+ # openai_messages.append(
820
+ # responses.ResponseReasoningItemParam(
821
+ # id=item.id or generate_tool_call_id(),
822
+ # summary=[Summary(text=item.content, type='summary_text')],
823
+ # type='reasoning',
824
+ # )
825
+ # )
826
+ pass
778
827
  else:
779
828
  assert_never(item)
780
829
  else:
@@ -948,13 +997,43 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
948
997
  vendor_part_id=chunk.item.id,
949
998
  tool_name=chunk.item.name,
950
999
  args=chunk.item.arguments,
951
- tool_call_id=chunk.item.id,
1000
+ tool_call_id=chunk.item.call_id,
1001
+ )
1002
+ elif isinstance(chunk.item, responses.ResponseReasoningItem):
1003
+ content = chunk.item.summary[0].text if chunk.item.summary else ''
1004
+ yield self._parts_manager.handle_thinking_delta(
1005
+ vendor_part_id=chunk.item.id,
1006
+ content=content,
1007
+ signature=chunk.item.id,
1008
+ )
1009
+ elif isinstance(chunk.item, responses.ResponseOutputMessage):
1010
+ pass
1011
+ else:
1012
+ warnings.warn( # pragma: no cover
1013
+ f'Handling of this item type is not yet implemented. Please report on our GitHub: {chunk}',
1014
+ UserWarning,
952
1015
  )
953
1016
 
954
1017
  elif isinstance(chunk, responses.ResponseOutputItemDoneEvent):
955
1018
  # NOTE: We only need this if the tool call deltas don't include the final info.
956
1019
  pass
957
1020
 
1021
+ elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
1022
+ pass # there's nothing we need to do here
1023
+
1024
+ elif isinstance(chunk, responses.ResponseReasoningSummaryPartDoneEvent):
1025
+ pass # there's nothing we need to do here
1026
+
1027
+ elif isinstance(chunk, responses.ResponseReasoningSummaryTextDoneEvent):
1028
+ pass # there's nothing we need to do here
1029
+
1030
+ elif isinstance(chunk, responses.ResponseReasoningSummaryTextDeltaEvent):
1031
+ yield self._parts_manager.handle_thinking_delta(
1032
+ vendor_part_id=chunk.item_id,
1033
+ content=chunk.delta,
1034
+ signature=chunk.item_id,
1035
+ )
1036
+
958
1037
  elif isinstance(chunk, responses.ResponseTextDeltaEvent):
959
1038
  yield self._parts_manager.handle_text_delta(vendor_part_id=chunk.content_index, content=chunk.delta)
960
1039
 
@@ -9,6 +9,7 @@ from datetime import date, datetime, timedelta
9
9
  from typing import Any, Literal
10
10
 
11
11
  import pydantic_core
12
+ from typing_extensions import assert_never
12
13
 
13
14
  from .. import _utils
14
15
  from ..messages import (
@@ -19,17 +20,14 @@ from ..messages import (
19
20
  ModelResponseStreamEvent,
20
21
  RetryPromptPart,
21
22
  TextPart,
23
+ ThinkingPart,
22
24
  ToolCallPart,
23
25
  ToolReturnPart,
24
26
  )
25
27
  from ..settings import ModelSettings
26
28
  from ..tools import ToolDefinition
27
29
  from ..usage import Usage
28
- from . import (
29
- Model,
30
- ModelRequestParameters,
31
- StreamedResponse,
32
- )
30
+ from . import Model, ModelRequestParameters, StreamedResponse
33
31
  from .function import _estimate_string_tokens, _estimate_usage # pyright: ignore[reportPrivateUsage]
34
32
 
35
33
 
@@ -254,10 +252,15 @@ class TestStreamedResponse(StreamedResponse):
254
252
  for word in words:
255
253
  self._usage += _get_string_usage(word)
256
254
  yield self._parts_manager.handle_text_delta(vendor_part_id=i, content=word)
257
- else:
255
+ elif isinstance(part, ToolCallPart):
258
256
  yield self._parts_manager.handle_tool_call_part(
259
257
  vendor_part_id=i, tool_name=part.tool_name, args=part.args, tool_call_id=part.tool_call_id
260
258
  )
259
+ elif isinstance(part, ThinkingPart): # pragma: no cover
260
+ # NOTE: There's no way to reach this part of the code, since we don't generate ThinkingPart on TestModel.
261
+ assert False, "This should be unreachable — we don't generate ThinkingPart on TestModel."
262
+ else:
263
+ assert_never(part)
261
264
 
262
265
  @property
263
266
  def model_name(self) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.2.19
3
+ Version: 0.3.0
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,15 +30,15 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.2.19
33
+ Requires-Dist: pydantic-graph==0.3.0
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.2.19; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.3.0; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
41
- Requires-Dist: boto3>=1.35.74; extra == 'bedrock'
41
+ Requires-Dist: boto3>=1.37.24; extra == 'bedrock'
42
42
  Provides-Extra: cli
43
43
  Requires-Dist: argcomplete>=3.5.0; extra == 'cli'
44
44
  Requires-Dist: prompt-toolkit>=3; extra == 'cli'
@@ -48,11 +48,11 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.2.19; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.3.0; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.15.0; extra == 'google'
54
54
  Provides-Extra: groq
55
- Requires-Dist: groq>=0.15.0; extra == 'groq'
55
+ Requires-Dist: groq>=0.19.0; extra == 'groq'
56
56
  Provides-Extra: logfire
57
57
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
58
58
  Provides-Extra: mcp
@@ -60,7 +60,7 @@ Requires-Dist: mcp>=1.9.4; (python_version >= '3.10') and extra == 'mcp'
60
60
  Provides-Extra: mistral
61
61
  Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
62
62
  Provides-Extra: openai
63
- Requires-Dist: openai>=1.75.0; extra == 'openai'
63
+ Requires-Dist: openai>=1.76.0; extra == 'openai'
64
64
  Provides-Extra: tavily
65
65
  Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
66
66
  Provides-Extra: vertexai
@@ -1,21 +1,22 @@
1
1
  pydantic_ai/__init__.py,sha256=5flxyMQJVrHRMQ3MYaZf1el2ctNs0JmPClKbw2Q-Lsk,1160
2
2
  pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
3
3
  pydantic_ai/_a2a.py,sha256=8nNtx6GENDt2Ej3f1ui9L-FuNQBYVELpJFfwz-y7fUw,7234
4
- pydantic_ai/_agent_graph.py,sha256=0CCS-y_ESkW5SoBxD4sYlUBeCQwmoaQJUkliGJ-tpYs,37790
4
+ pydantic_ai/_agent_graph.py,sha256=8Y1xEFwNCp0hVsRst_2I4WQymeTFRJ4Ec5-lURMd5HQ,39671
5
5
  pydantic_ai/_cli.py,sha256=kc9UxGjYsKK0IR4No-V5BGiAtq2fY6eZZ9rBkAdHWOM,12948
6
- pydantic_ai/_function_schema.py,sha256=l1Nzp2o8ixp8W9NDkQucx-JFsN0iW4aeEVTBGEPOM4E,10608
6
+ pydantic_ai/_function_schema.py,sha256=VXHGnudrpyW40UJqCopgSUB_IuSip5pEEBSLGhVEuFI,10846
7
7
  pydantic_ai/_griffe.py,sha256=Sf_DisE9k2TA0VFeVIK2nf1oOct5MygW86PBCACJkFA,5244
8
8
  pydantic_ai/_output.py,sha256=HYhcaqcisU16PT_EFdl2VuV5MI-nRFbUPzijd_rTTgM,16787
9
- pydantic_ai/_parts_manager.py,sha256=c0Gj29FH8K20AmxIr7MY8_SQVdb7SRIRcJYTQVmVYgc,12204
9
+ pydantic_ai/_parts_manager.py,sha256=Lioi8b7Nfyax09yQu8jTkMzxd26dYDrdAqhYvjRSKqQ,16182
10
10
  pydantic_ai/_system_prompt.py,sha256=W5wYN6rH5JCshl1xI2s0ygevBCutCraqyG6t75yZubk,1117
11
+ pydantic_ai/_thinking_part.py,sha256=mzx2RZSfiQxAKpljEflrcXRXmFKxtp6bKVyorY3UYZk,1554
11
12
  pydantic_ai/_utils.py,sha256=qi2NjYpIVOgCHDMPgyV8oUL42Fv2_rLyj8KdOUO5fQU,11319
12
- pydantic_ai/agent.py,sha256=vS7Tbiu_UUjKAvGXcBArSS4sU21WI8sUBY1t-m1Uy6Q,94412
13
+ pydantic_ai/agent.py,sha256=oNW5ffihOF1Kn13N3GZ9wudyooGxN0O3r1wGJAwYUMY,94448
13
14
  pydantic_ai/direct.py,sha256=tXRcQ3fMkykaawO51VxnSwQnqcEmu1LhCy7U9gOyM-g,7768
14
15
  pydantic_ai/exceptions.py,sha256=IdFw594Ou7Vn4YFa7xdZ040_j_6nmyA3MPANbC7sys4,3175
15
16
  pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
16
17
  pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
17
- pydantic_ai/mcp.py,sha256=q8CDo5BKQvWmfYb_J7vbnyZptCn6xksE2fETCW8dL8g,18230
18
- pydantic_ai/messages.py,sha256=UraSZP4yTSQa0UnFfBrnD5m80GXNY1hf6YE72F5wz4A,33350
18
+ pydantic_ai/mcp.py,sha256=OkbwSBODgeC4BX2QIvTmECZJbeSYtjZ15ZPnEyf95UI,20157
19
+ pydantic_ai/messages.py,sha256=Z8cNpaEcMgdJpyE9ydBLBDJV0A-Hf-GllLAWeUKY4_0,36124
19
20
  pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
21
  pydantic_ai/result.py,sha256=YlcR0QAQIejz3fbZ50zYfHKIZco0dwmnZTxytV-n3oM,24609
21
22
  pydantic_ai/settings.py,sha256=eRJs2fI2yaIrhtYRlWqKlC9KnFaJHvslgSll8NQ20jc,3533
@@ -27,18 +28,18 @@ pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQ
27
28
  pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
29
  pydantic_ai/ext/langchain.py,sha256=TI8B6eBjEGKFfvwyLgC_-0eeba4hDJq7wLZ0OZhbiWw,1967
29
30
  pydantic_ai/models/__init__.py,sha256=LhBw4yxIEMByJPthAiWtQwGgNlj3cQkOaX6wtzeMFjA,27947
30
- pydantic_ai/models/anthropic.py,sha256=E7vVhCkw474aKWTzn5UZOlwEV8xgjeFrjJo9HbcCltc,21402
31
- pydantic_ai/models/bedrock.py,sha256=OVuPe_gQ1KHzvN9k5w0IZjdY3uGbQ1LqDAeaC9ffBlE,27944
32
- pydantic_ai/models/cohere.py,sha256=bQLTQguqVXkzkPgWmMncrxApO9CQ7YtgrmFwa057g7g,12116
31
+ pydantic_ai/models/anthropic.py,sha256=s7yvNObBfS-gcXLT0vU8UXjLHITsbr5kkXgP1SYkPms,23832
32
+ pydantic_ai/models/bedrock.py,sha256=67qf_mFnx0kfmKoI96zLOAUn3P47PxPqMrQsaYUrJJ0,29120
33
+ pydantic_ai/models/cohere.py,sha256=UU04-_O-KLgC4DUpM-g4FBPoTOatbmVJJ7mkZNBGsbQ,12626
33
34
  pydantic_ai/models/fallback.py,sha256=idOYGMo3CZzpCBT8DDiuPAAgnV2jzluDUq3ESb3KteM,4981
34
- pydantic_ai/models/function.py,sha256=rnihsyakyieCGbEyxfqzvoBHnR_3LJn4x6DXQqdAAM4,11458
35
- pydantic_ai/models/gemini.py,sha256=6tjI0aQgzW4i6qtAZzzjDLAkre-mfLywybjaFos8hbA,36199
36
- pydantic_ai/models/google.py,sha256=fgKmM_8N3Y0_R9vwGFWZ8J2zpMPUokJPLLp7w3zIC18,21212
37
- pydantic_ai/models/groq.py,sha256=rnAwTJ5AXgspqSqi2nJPlqj7sOeZ8H04XNs5cWJqKE4,17816
38
- pydantic_ai/models/instrumented.py,sha256=DvBNxgkxmMGeUQvBUJUAfRCpgXpJzQhNNe_M5TAVCbw,15679
39
- pydantic_ai/models/mistral.py,sha256=teNN6IfTTpfZxtSXFdD2M1IDrDj0_EHJt-9gDmAKLvg,29563
40
- pydantic_ai/models/openai.py,sha256=l1TZ2ZR7zmga2yexGVElbkCGnY35yHSTKU8CQvPRGjQ,44996
41
- pydantic_ai/models/test.py,sha256=Jlq-YQ9dhzENgmBMVerZpM4L-I2aPf7HH7ifIncyDlE,17010
35
+ pydantic_ai/models/function.py,sha256=xvN_oNKw0X4c16oe1l3MX2_kJtFWMOMaseMNO6eNBYI,11709
36
+ pydantic_ai/models/gemini.py,sha256=d8HY9nc-tcuWFmA5OdKsWABMTpXq68sUL6xE8zY6dzs,37383
37
+ pydantic_ai/models/google.py,sha256=AVXC3CPG1aduGXSc0XFEYnrT6LsNKfNWp-kmf1SQssg,22294
38
+ pydantic_ai/models/groq.py,sha256=lojKRdvg0p-EtZ20Z2CS4I0goq4CoGkLj3LuYHA6o-I,18497
39
+ pydantic_ai/models/instrumented.py,sha256=vVq7mS071EXS2PZ3NJ4Zgt93iQgAscFr2dyg9fAeuCE,15703
40
+ pydantic_ai/models/mistral.py,sha256=LHm3F2yVKoE1uDjEPtTPug6duHwr4A42qey2Pncqqx4,30093
41
+ pydantic_ai/models/openai.py,sha256=onyJSKCo5zj_VY22RTQnPRE0Bpxu1ojgtftveQF_VQc,49633
42
+ pydantic_ai/models/test.py,sha256=X5QVCsBAWXxw4MKet-UTGZ0FteUnCHoK3Py3ngJM2Zk,17437
42
43
  pydantic_ai/models/wrapper.py,sha256=43ntRkTF7rVBYLC-Ihdo1fkwpeveOpA_1fXe1fd3W9Y,1690
43
44
  pydantic_ai/profiles/__init__.py,sha256=uO_f1kSqrnXuO0x5U0EHTTMRYcmOiOoa-tS1OZppxBk,1426
44
45
  pydantic_ai/profiles/_json_schema.py,sha256=3ofRGnBca9WzqlUbw0C1ywhv_V7eGTmFAf2O7Bs5zgk,7199
@@ -69,8 +70,8 @@ pydantic_ai/providers/mistral.py,sha256=EIUSENjFuGzBhvbdrarUTM4VPkesIMnZrzfnEKHO
69
70
  pydantic_ai/providers/openai.py,sha256=7iGij0EaFylab7dTZAZDgXr78tr-HsZrn9EI9AkWBNQ,3091
70
71
  pydantic_ai/providers/openrouter.py,sha256=NXjNdnlXIBrBMMqbzcWQnowXOuZh4NHikXenBn5h3mc,4061
71
72
  pydantic_ai/providers/together.py,sha256=zFVSMSm5jXbpkNouvBOTjWrPmlPpCp6sQS5LMSyVjrQ,3482
72
- pydantic_ai_slim-0.2.19.dist-info/METADATA,sha256=b142ihHQIPR8cvWByEu4lxNblYCiGNz9kJAjfpmAXBw,3850
73
- pydantic_ai_slim-0.2.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
74
- pydantic_ai_slim-0.2.19.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
75
- pydantic_ai_slim-0.2.19.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
76
- pydantic_ai_slim-0.2.19.dist-info/RECORD,,
73
+ pydantic_ai_slim-0.3.0.dist-info/METADATA,sha256=GmMBkJvakRA_lUHh_jO941_uxk5JwGKgWNle0dLCAOQ,3846
74
+ pydantic_ai_slim-0.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
75
+ pydantic_ai_slim-0.3.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
76
+ pydantic_ai_slim-0.3.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
77
+ pydantic_ai_slim-0.3.0.dist-info/RECORD,,