pydantic-ai-slim 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -710,6 +710,18 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
710
710
  __repr__ = dataclasses_no_defaults_repr
711
711
 
712
712
 
713
+ @dataclasses.dataclass
714
+ class SetFinalResult(AgentNode[DepsT, NodeRunEndT]):
715
+ """A node that immediately ends the graph run after a streaming response produced a final result."""
716
+
717
+ final_result: result.FinalResult[NodeRunEndT]
718
+
719
+ async def run(
720
+ self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
721
+ ) -> End[result.FinalResult[NodeRunEndT]]:
722
+ return End(self.final_result)
723
+
724
+
713
725
  def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, Any]]) -> RunContext[DepsT]:
714
726
  """Build a `RunContext` object from the current agent graph run context."""
715
727
  return RunContext[DepsT](
@@ -1123,6 +1135,7 @@ def build_agent_graph(
1123
1135
  UserPromptNode[DepsT],
1124
1136
  ModelRequestNode[DepsT],
1125
1137
  CallToolsNode[DepsT],
1138
+ SetFinalResult[DepsT],
1126
1139
  )
1127
1140
  graph = Graph[GraphAgentState, GraphAgentDeps[DepsT, Any], result.FinalResult[OutputT]](
1128
1141
  nodes=nodes,
@@ -524,6 +524,14 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
524
524
  await stream.get_output(), final_result.tool_name, final_result.tool_call_id
525
525
  )
526
526
 
527
+ # When we get here, the `ModelRequestNode` has completed streaming after the final result was found.
528
+ # When running an agent with `agent.run`, we'd then move to `CallToolsNode` to execute the tool calls and
529
+ # find the final result.
530
+ # We also want to execute tool calls (in case `agent.end_strategy == 'exhaustive'`) here, but
531
+ # we don't want to use run the `CallToolsNode` logic to determine the final output, as it would be
532
+ # wasteful and could produce a different result (e.g. when text output is followed by tool calls).
533
+ # So we call `process_tool_calls` directly and then end the run with the found final result.
534
+
527
535
  parts: list[_messages.ModelRequestPart] = []
528
536
  async for _event in _agent_graph.process_tool_calls(
529
537
  tool_manager=graph_ctx.deps.tool_manager,
@@ -534,9 +542,13 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
534
542
  output_parts=parts,
535
543
  ):
536
544
  pass
545
+
546
+ # For backwards compatibility, append a new ModelRequest using the tool returns and retries
537
547
  if parts:
538
548
  messages.append(_messages.ModelRequest(parts))
539
549
 
550
+ await agent_run.next(_agent_graph.SetFinalResult(final_result))
551
+
540
552
  yield StreamedRunResult(
541
553
  messages,
542
554
  graph_ctx.deps.new_message_index,
@@ -55,6 +55,8 @@ KnownModelName = TypeAliasType(
55
55
  'anthropic:claude-3-5-sonnet-20240620',
56
56
  'anthropic:claude-3-5-sonnet-20241022',
57
57
  'anthropic:claude-3-5-sonnet-latest',
58
+ 'anthropic:claude-haiku-4-5',
59
+ 'anthropic:claude-haiku-4-5-20251001',
58
60
  'anthropic:claude-3-7-sonnet-20250219',
59
61
  'anthropic:claude-3-7-sonnet-latest',
60
62
  'anthropic:claude-3-haiku-20240307',
@@ -600,7 +600,7 @@ class OpenAIChatModel(Model):
600
600
 
601
601
  return ModelResponse(
602
602
  parts=items,
603
- usage=_map_usage(response),
603
+ usage=_map_usage(response, self._provider.name, self._provider.base_url, self._model_name),
604
604
  model_name=response.model,
605
605
  timestamp=timestamp,
606
606
  provider_details=vendor_details or None,
@@ -631,6 +631,7 @@ class OpenAIChatModel(Model):
631
631
  _response=peekable_response,
632
632
  _timestamp=number_to_datetime(first_chunk.created),
633
633
  _provider_name=self._provider.name,
634
+ _provider_url=self._provider.base_url,
634
635
  )
635
636
 
636
637
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]:
@@ -1061,7 +1062,7 @@ class OpenAIResponsesModel(Model):
1061
1062
 
1062
1063
  return ModelResponse(
1063
1064
  parts=items,
1064
- usage=_map_usage(response),
1065
+ usage=_map_usage(response, self._provider.name, self._provider.base_url, self._model_name),
1065
1066
  model_name=response.model,
1066
1067
  provider_response_id=response.id,
1067
1068
  timestamp=timestamp,
@@ -1088,6 +1089,7 @@ class OpenAIResponsesModel(Model):
1088
1089
  _response=peekable_response,
1089
1090
  _timestamp=number_to_datetime(first_chunk.response.created_at),
1090
1091
  _provider_name=self._provider.name,
1092
+ _provider_url=self._provider.base_url,
1091
1093
  )
1092
1094
 
1093
1095
  @overload
@@ -1589,10 +1591,11 @@ class OpenAIStreamedResponse(StreamedResponse):
1589
1591
  _response: AsyncIterable[ChatCompletionChunk]
1590
1592
  _timestamp: datetime
1591
1593
  _provider_name: str
1594
+ _provider_url: str
1592
1595
 
1593
1596
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1594
1597
  async for chunk in self._response:
1595
- self._usage += _map_usage(chunk)
1598
+ self._usage += _map_usage(chunk, self._provider_name, self._provider_url, self._model_name)
1596
1599
 
1597
1600
  if chunk.id: # pragma: no branch
1598
1601
  self.provider_response_id = chunk.id
@@ -1683,12 +1686,13 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1683
1686
  _response: AsyncIterable[responses.ResponseStreamEvent]
1684
1687
  _timestamp: datetime
1685
1688
  _provider_name: str
1689
+ _provider_url: str
1686
1690
 
1687
1691
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
1688
1692
  async for chunk in self._response:
1689
1693
  # NOTE: You can inspect the builtin tools used checking the `ResponseCompletedEvent`.
1690
1694
  if isinstance(chunk, responses.ResponseCompletedEvent):
1691
- self._usage += _map_usage(chunk.response)
1695
+ self._usage += self._map_usage(chunk.response)
1692
1696
 
1693
1697
  raw_finish_reason = (
1694
1698
  details.reason if (details := chunk.response.incomplete_details) else chunk.response.status
@@ -1708,7 +1712,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1708
1712
  self.provider_response_id = chunk.response.id
1709
1713
 
1710
1714
  elif isinstance(chunk, responses.ResponseFailedEvent): # pragma: no cover
1711
- self._usage += _map_usage(chunk.response)
1715
+ self._usage += self._map_usage(chunk.response)
1712
1716
 
1713
1717
  elif isinstance(chunk, responses.ResponseFunctionCallArgumentsDeltaEvent):
1714
1718
  maybe_event = self._parts_manager.handle_tool_call_delta(
@@ -1722,10 +1726,10 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1722
1726
  pass # there's nothing we need to do here
1723
1727
 
1724
1728
  elif isinstance(chunk, responses.ResponseIncompleteEvent): # pragma: no cover
1725
- self._usage += _map_usage(chunk.response)
1729
+ self._usage += self._map_usage(chunk.response)
1726
1730
 
1727
1731
  elif isinstance(chunk, responses.ResponseInProgressEvent):
1728
- self._usage += _map_usage(chunk.response)
1732
+ self._usage += self._map_usage(chunk.response)
1729
1733
 
1730
1734
  elif isinstance(chunk, responses.ResponseOutputItemAddedEvent):
1731
1735
  if isinstance(chunk.item, responses.ResponseFunctionToolCall):
@@ -1906,6 +1910,9 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1906
1910
  UserWarning,
1907
1911
  )
1908
1912
 
1913
+ def _map_usage(self, response: responses.Response):
1914
+ return _map_usage(response, self._provider_name, self._provider_url, self._model_name)
1915
+
1909
1916
  @property
1910
1917
  def model_name(self) -> OpenAIModelName:
1911
1918
  """Get the model name of the response."""
@@ -1922,55 +1929,45 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1922
1929
  return self._timestamp
1923
1930
 
1924
1931
 
1925
- def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.Response) -> usage.RequestUsage:
1932
+ def _map_usage(
1933
+ response: chat.ChatCompletion | ChatCompletionChunk | responses.Response,
1934
+ provider: str,
1935
+ provider_url: str,
1936
+ model: str,
1937
+ ) -> usage.RequestUsage:
1926
1938
  response_usage = response.usage
1927
1939
  if response_usage is None:
1928
1940
  return usage.RequestUsage()
1929
- elif isinstance(response_usage, responses.ResponseUsage):
1930
- details: dict[str, int] = {
1931
- key: value
1932
- for key, value in response_usage.model_dump(
1933
- exclude={'input_tokens', 'output_tokens', 'total_tokens'}
1934
- ).items()
1935
- if isinstance(value, int)
1936
- }
1937
- # Handle vLLM compatibility - some providers don't include token details
1938
- if getattr(response_usage, 'input_tokens_details', None) is not None:
1939
- cache_read_tokens = response_usage.input_tokens_details.cached_tokens
1940
- else:
1941
- cache_read_tokens = 0
1941
+
1942
+ usage_data = response_usage.model_dump(exclude_none=True)
1943
+ details = {
1944
+ k: v
1945
+ for k, v in usage_data.items()
1946
+ if k not in {'prompt_tokens', 'completion_tokens', 'input_tokens', 'output_tokens', 'total_tokens'}
1947
+ if isinstance(v, int)
1948
+ }
1949
+ response_data = dict(model=model, usage=usage_data)
1950
+ if isinstance(response_usage, responses.ResponseUsage):
1951
+ api_flavor = 'responses'
1942
1952
 
1943
1953
  if getattr(response_usage, 'output_tokens_details', None) is not None:
1944
1954
  details['reasoning_tokens'] = response_usage.output_tokens_details.reasoning_tokens
1945
1955
  else:
1946
1956
  details['reasoning_tokens'] = 0
1947
-
1948
- return usage.RequestUsage(
1949
- input_tokens=response_usage.input_tokens,
1950
- output_tokens=response_usage.output_tokens,
1951
- cache_read_tokens=cache_read_tokens,
1952
- details=details,
1953
- )
1954
1957
  else:
1955
- details = {
1956
- key: value
1957
- for key, value in response_usage.model_dump(
1958
- exclude_none=True, exclude={'prompt_tokens', 'completion_tokens', 'total_tokens'}
1959
- ).items()
1960
- if isinstance(value, int)
1961
- }
1962
- u = usage.RequestUsage(
1963
- input_tokens=response_usage.prompt_tokens,
1964
- output_tokens=response_usage.completion_tokens,
1965
- details=details,
1966
- )
1958
+ api_flavor = 'chat'
1959
+
1967
1960
  if response_usage.completion_tokens_details is not None:
1968
1961
  details.update(response_usage.completion_tokens_details.model_dump(exclude_none=True))
1969
- u.output_audio_tokens = response_usage.completion_tokens_details.audio_tokens or 0
1970
- if response_usage.prompt_tokens_details is not None:
1971
- u.input_audio_tokens = response_usage.prompt_tokens_details.audio_tokens or 0
1972
- u.cache_read_tokens = response_usage.prompt_tokens_details.cached_tokens or 0
1973
- return u
1962
+
1963
+ return usage.RequestUsage.extract(
1964
+ response_data,
1965
+ provider=provider,
1966
+ provider_url=provider_url,
1967
+ provider_fallback='openai',
1968
+ api_flavor=api_flavor,
1969
+ details=details,
1970
+ )
1974
1971
 
1975
1972
 
1976
1973
  def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
@@ -83,7 +83,7 @@ def gateway_provider(
83
83
  ' to use the Pydantic AI Gateway provider.'
84
84
  )
85
85
 
86
- base_url = base_url or os.getenv('PYDANTIC_AI_GATEWAY_BASE_URL', 'http://localhost:8787/proxy')
86
+ base_url = base_url or os.getenv('PYDANTIC_AI_GATEWAY_BASE_URL', 'https://gateway.pydantic.dev/proxy')
87
87
  http_client = http_client or cached_async_http_client(provider=f'gateway-{upstream_provider}')
88
88
  http_client.event_hooks = {'request': [_request_hook]}
89
89
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.1.0
3
+ Version: 1.2.1
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -29,11 +29,11 @@ Classifier: Topic :: Internet
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.10
31
31
  Requires-Dist: exceptiongroup; python_version < '3.11'
32
- Requires-Dist: genai-prices>=0.0.30
32
+ Requires-Dist: genai-prices>=0.0.31
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.1.0
36
+ Requires-Dist: pydantic-graph==1.2.1
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -42,7 +42,7 @@ Provides-Extra: ag-ui
42
42
  Requires-Dist: ag-ui-protocol>=0.1.8; extra == 'ag-ui'
43
43
  Requires-Dist: starlette>=0.45.3; extra == 'ag-ui'
44
44
  Provides-Extra: anthropic
45
- Requires-Dist: anthropic>=0.69.0; extra == 'anthropic'
45
+ Requires-Dist: anthropic>=0.70.0; extra == 'anthropic'
46
46
  Provides-Extra: bedrock
47
47
  Requires-Dist: boto3>=1.39.0; extra == 'bedrock'
48
48
  Provides-Extra: cli
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.1.0; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.2.1; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -1,7 +1,7 @@
1
1
  pydantic_ai/__init__.py,sha256=IgLTfgpGwbYsT_d_2wSucOfFyIMl1GH6v-yfkNs_zrM,5149
2
2
  pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
3
3
  pydantic_ai/_a2a.py,sha256=3_pl7JW2yHdu31qLgCrdcTZTqXaJNjAwUV6zavah_w8,12159
4
- pydantic_ai/_agent_graph.py,sha256=D_Oo_LbPqGwkVAaEAcFFERwrg9pVkxeZ45vtTMhES8M,54230
4
+ pydantic_ai/_agent_graph.py,sha256=beDv9Ixg3Q3LGgNK6BWyvsjjqA27gq3zHvceT1rXuBE,54688
5
5
  pydantic_ai/_cli.py,sha256=iZTCFrpJy3aUZ49nJQ5nw2INFw6gPVQd8EhB0rahVcI,14005
6
6
  pydantic_ai/_function_schema.py,sha256=UnDGh7Wh5z70pEaRujXF_hKsSibQdN2ywI6lZGz3LUo,11663
7
7
  pydantic_ai/_griffe.py,sha256=BphvTL00FHxsSY56GM-bNyCOdwrpL0T3LbDQITWUK_Q,5280
@@ -32,7 +32,7 @@ pydantic_ai/settings.py,sha256=0mr6KudxKKjTG8e3nsv_8vDLxNhu_1-WvefCOzCGSYM,3565
32
32
  pydantic_ai/tools.py,sha256=dCecmJtRkF1ioqFYbfT00XGGqzGB4PPO9n6IrHCQtnc,20343
33
33
  pydantic_ai/usage.py,sha256=_xXoPIfpENghWcjBvMj0URXQV6YwHWxxZYma4WZ4vUg,15710
34
34
  pydantic_ai/agent/__init__.py,sha256=VigDqMYLKQHsNYWYy6qPkqN0yfdffqxBYEA5YyxkIBM,67111
35
- pydantic_ai/agent/abstract.py,sha256=69kTaR-ZMEmLJ4tD3oGQS5VuomXtNL8t5mxmPz8Ao50,54587
35
+ pydantic_ai/agent/abstract.py,sha256=Akq1NvfzXbIEJwwvo_t-FQ6MobW_cPWSeUXffdUN7Og,55651
36
36
  pydantic_ai/agent/wrapper.py,sha256=ygwfMq24mGe3pGIK-TtPAy3cV7M8VZJW3ulEHvwNTck,10293
37
37
  pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  pydantic_ai/common_tools/duckduckgo.py,sha256=1ae_o3zqMGrC6KFqAmuqPwJqQgNBTisuvU2jX9KU8PI,2273
@@ -62,7 +62,7 @@ pydantic_ai/durable_exec/temporal/_toolset.py,sha256=IlPQrumm2MpZrb518ru15s0jIl8
62
62
  pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
63
  pydantic_ai/ext/aci.py,sha256=YWYLXzTQJ6hS7qfgNycA8cRl69gogGgThqEU6II7eMA,2527
64
64
  pydantic_ai/ext/langchain.py,sha256=kmbbV3Cx2BiNYEJCZMHVYQquUQD-zG2L_bwDangy0Ww,2317
65
- pydantic_ai/models/__init__.py,sha256=D-lOP764gxfjdaNjUujpHlvpJQuIjGfYO8ljcohQ_rE,35736
65
+ pydantic_ai/models/__init__.py,sha256=_47Vu42NfhDkm5TeRU3s0ibVBOGZr44uyhMLzA8Gg0I,35821
66
66
  pydantic_ai/models/anthropic.py,sha256=-vW7aoPrELKnJzbooCEhMu8__jY6iqvWdFJbIKeQPa8,38087
67
67
  pydantic_ai/models/bedrock.py,sha256=fha8zVZgDFYgDqO5nvBkZ2CEv4GV92yq_YnK4qmD73E,33639
68
68
  pydantic_ai/models/cohere.py,sha256=_ccK7XBts1OwD-RP8puU3z425SZ4PeJGts1WFhPjikg,14051
@@ -75,7 +75,7 @@ pydantic_ai/models/huggingface.py,sha256=711C0ysjLYKriGfSxPiaF6lqjGcNmIaJaCvAXou
75
75
  pydantic_ai/models/instrumented.py,sha256=J8eVTutr3UP1r_wd5sM5c0BIdzkRqT-EGgd2NiF0ssQ,22319
76
76
  pydantic_ai/models/mcp_sampling.py,sha256=qY4y4nXbRpNp2QbkfjzWLvF_8KLZGXypz4cc0lYRHXU,3553
77
77
  pydantic_ai/models/mistral.py,sha256=fi57hADjYxZw8wEpAcNI6mqY32VG9hHK9GGRQ-9vlZg,33905
78
- pydantic_ai/models/openai.py,sha256=nCqFy2sRihygbBbTYSbMy_W9LiXwNoRzIUNHSJk5ctc,99861
78
+ pydantic_ai/models/openai.py,sha256=1DRmsFx2beuxH5RAy2o_PRMAtxAmF2Gba-EmqyK_9BM,99457
79
79
  pydantic_ai/models/test.py,sha256=5ER66nwZG7Iwm-KkzPo4vwNd3rulzgkpgysu4YcT1W4,20568
80
80
  pydantic_ai/models/wrapper.py,sha256=nwh8Gea59blbr1JDKlUnkYICuI9TUubC4qP7iZRRW28,2440
81
81
  pydantic_ai/profiles/__init__.py,sha256=UHknN-CYsQexUaxfsgz_J_uSZ9QwistLSuAErQkvbcM,3385
@@ -100,7 +100,7 @@ pydantic_ai/providers/cerebras.py,sha256=3rIu092TYYuI5S4mlRjWxay5uomPbEDyHWIBMfr
100
100
  pydantic_ai/providers/cohere.py,sha256=L3wgvcbxRRPrIKoZka_DQl1Uvi1VxBPMJikrzJ85iHE,2839
101
101
  pydantic_ai/providers/deepseek.py,sha256=zop0sb1XzdzSuI2dCNXrinfMdxoqB8H_rp2zw6ItbKc,3023
102
102
  pydantic_ai/providers/fireworks.py,sha256=t4PznbxnD9GnzZ3wYqSn6xdxRRJlYzNKf_EZzX0UWl8,3585
103
- pydantic_ai/providers/gateway.py,sha256=Xgns651ndehQ3F6PkMw8CBgaFBmxQx25LjrQA8PuPjk,6937
103
+ pydantic_ai/providers/gateway.py,sha256=QEpTLOgUCoDiL8ZOm5JZtGMm7t7Jfluk_nTd6a8LfbA,6944
104
104
  pydantic_ai/providers/github.py,sha256=yi7c16_Ao1E1QmehVfdsO9NrjDGK1moaHTK-P5cIrsI,4369
105
105
  pydantic_ai/providers/google.py,sha256=scCHek7whNEbi742hnRlKItboYOoxtYosgNN7wDjvpM,6019
106
106
  pydantic_ai/providers/google_gla.py,sha256=PnmnzgCOPJB1kMVnNVqZu2Cdzk7K9jx2z0MpbJ6EkII,1951
@@ -130,8 +130,8 @@ pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fb
130
130
  pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
131
131
  pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
132
132
  pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
133
- pydantic_ai_slim-1.1.0.dist-info/METADATA,sha256=T4LppdZllkCLfqapnpsV7PxT7tyntYq4wCc-UDV8b9A,4703
134
- pydantic_ai_slim-1.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
135
- pydantic_ai_slim-1.1.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
136
- pydantic_ai_slim-1.1.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
137
- pydantic_ai_slim-1.1.0.dist-info/RECORD,,
133
+ pydantic_ai_slim-1.2.1.dist-info/METADATA,sha256=ToerQFMS6lH9tRpFuCCdu3IPnjTb7RqVYp6DUk1XF8k,4703
134
+ pydantic_ai_slim-1.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
135
+ pydantic_ai_slim-1.2.1.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
136
+ pydantic_ai_slim-1.2.1.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
137
+ pydantic_ai_slim-1.2.1.dist-info/RECORD,,