pydantic-ai 0.0.30__tar.gz → 0.0.32__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai might be problematic. Click here for more details.

Files changed (63) hide show
  1. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/PKG-INFO +4 -4
  2. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/pyproject.toml +12 -4
  3. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_instrumented.py +49 -3
  4. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_agent.py +36 -0
  5. pydantic_ai-0.0.32/tests/test_logfire.py +193 -0
  6. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_streaming.py +4 -4
  7. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_utils.py +24 -0
  8. pydantic_ai-0.0.30/tests/test_logfire.py +0 -261
  9. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/.gitignore +0 -0
  10. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/LICENSE +0 -0
  11. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/Makefile +0 -0
  12. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/README.md +0 -0
  13. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/__init__.py +0 -0
  14. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/assets/kiwi.png +0 -0
  15. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/assets/marcelo.mp3 +0 -0
  16. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/conftest.py +0 -0
  17. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/example_modules/README.md +0 -0
  18. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/example_modules/bank_database.py +0 -0
  19. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/example_modules/fake_database.py +0 -0
  20. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/example_modules/weather_service.py +0 -0
  21. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/graph/__init__.py +0 -0
  22. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/graph/test_graph.py +0 -0
  23. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/graph/test_history.py +0 -0
  24. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/graph/test_mermaid.py +0 -0
  25. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/graph/test_state.py +0 -0
  26. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/graph/test_utils.py +0 -0
  27. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/import_examples.py +0 -0
  28. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/json_body_serializer.py +0 -0
  29. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/__init__.py +0 -0
  30. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  31. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  32. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  33. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  34. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  35. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  36. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  37. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  38. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  39. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  40. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  41. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/mock_async_stream.py +0 -0
  42. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_anthropic.py +0 -0
  43. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_cohere.py +0 -0
  44. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_fallback.py +0 -0
  45. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_gemini.py +0 -0
  46. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_groq.py +0 -0
  47. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_mistral.py +0 -0
  48. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_model.py +0 -0
  49. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_model_function.py +0 -0
  50. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_model_names.py +0 -0
  51. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_model_test.py +0 -0
  52. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_openai.py +0 -0
  53. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/models/test_vertexai.py +0 -0
  54. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_deps.py +0 -0
  55. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_examples.py +0 -0
  56. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_format_as_xml.py +0 -0
  57. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_json_body_serializer.py +0 -0
  58. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_live.py +0 -0
  59. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_parts_manager.py +0 -0
  60. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_tools.py +0 -0
  61. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/test_usage_limits.py +0 -0
  62. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/typed_agent.py +0 -0
  63. {pydantic_ai-0.0.30 → pydantic_ai-0.0.32}/tests/typed_graph.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.0.30
3
+ Version: 0.0.32
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
7
7
  Project-URL: Documentation, https://ai.pydantic.dev
8
8
  Project-URL: Changelog, https://github.com/pydantic/pydantic-ai/releases
9
- Author-email: Samuel Colvin <samuel@pydantic.dev>
9
+ Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
10
10
  License-Expression: MIT
11
11
  License-File: LICENSE
12
12
  Classifier: Development Status :: 4 - Beta
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.30
31
+ Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.32
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.0.30; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.0.32; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=2.3; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -4,9 +4,14 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai"
7
- version = "0.0.30"
7
+ version = "0.0.32"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs"
9
- authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
9
+ authors = [
10
+ { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
11
+ { name = "Marcelo Trylesinski", email = "marcelotryle@gmail.com" },
12
+ { name = "David Montague", email = "david@pydantic.dev" },
13
+ { name = "Alex Hall", email = "alex@pydantic.dev" },
14
+ ]
10
15
  license = "MIT"
11
16
  readme = "README.md"
12
17
  classifiers = [
@@ -32,7 +37,7 @@ classifiers = [
32
37
  requires-python = ">=3.9"
33
38
 
34
39
  dependencies = [
35
- "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.30",
40
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.32",
36
41
  ]
37
42
 
38
43
  [project.urls]
@@ -42,7 +47,7 @@ Documentation = "https://ai.pydantic.dev"
42
47
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
43
48
 
44
49
  [project.optional-dependencies]
45
- examples = ["pydantic-ai-examples==0.0.30"]
50
+ examples = ["pydantic-ai-examples==0.0.32"]
46
51
  logfire = ["logfire>=2.3"]
47
52
 
48
53
  [tool.uv.sources]
@@ -165,6 +170,9 @@ exclude_lines = [
165
170
  [tool.logfire]
166
171
  ignore_no_config = true
167
172
 
173
+ [tool.inline-snapshot]
174
+ format-command="ruff format --stdin-filename {filename}"
175
+
168
176
  [tool.inline-snapshot.shortcuts]
169
177
  snap-fix = ["create", "fix"]
170
178
  snap = ["create"]
@@ -106,7 +106,7 @@ class MyResponseStream(StreamedResponse):
106
106
  @pytest.mark.anyio
107
107
  @requires_logfire_events
108
108
  async def test_instrumented_model(capfire: CaptureLogfire):
109
- model = InstrumentedModel.from_logfire(MyModel(), event_mode='logs')
109
+ model = InstrumentedModel(MyModel(), event_mode='logs')
110
110
  assert model.system == 'my_system'
111
111
  assert model.model_name == 'my_model'
112
112
 
@@ -168,6 +168,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
168
168
  'severity_text': None,
169
169
  'attributes': {
170
170
  'gen_ai.system': 'my_system',
171
+ 'gen_ai.message.index': 0,
171
172
  'event.name': 'gen_ai.system.message',
172
173
  },
173
174
  'timestamp': 2000000000,
@@ -182,6 +183,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
182
183
  'severity_text': None,
183
184
  'attributes': {
184
185
  'gen_ai.system': 'my_system',
186
+ 'gen_ai.message.index': 0,
185
187
  'event.name': 'gen_ai.user.message',
186
188
  },
187
189
  'timestamp': 4000000000,
@@ -196,6 +198,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
196
198
  'severity_text': None,
197
199
  'attributes': {
198
200
  'gen_ai.system': 'my_system',
201
+ 'gen_ai.message.index': 0,
199
202
  'event.name': 'gen_ai.tool.message',
200
203
  },
201
204
  'timestamp': 6000000000,
@@ -218,6 +221,7 @@ Fix the errors and try again.\
218
221
  'severity_text': None,
219
222
  'attributes': {
220
223
  'gen_ai.system': 'my_system',
224
+ 'gen_ai.message.index': 0,
221
225
  'event.name': 'gen_ai.tool.message',
222
226
  },
223
227
  'timestamp': 8000000000,
@@ -239,6 +243,7 @@ Fix the errors and try again.\
239
243
  'severity_text': None,
240
244
  'attributes': {
241
245
  'gen_ai.system': 'my_system',
246
+ 'gen_ai.message.index': 0,
242
247
  'event.name': 'gen_ai.user.message',
243
248
  },
244
249
  'timestamp': 10000000000,
@@ -253,6 +258,7 @@ Fix the errors and try again.\
253
258
  'severity_text': None,
254
259
  'attributes': {
255
260
  'gen_ai.system': 'my_system',
261
+ 'gen_ai.message.index': 1,
256
262
  'event.name': 'gen_ai.assistant.message',
257
263
  },
258
264
  'timestamp': 12000000000,
@@ -324,7 +330,7 @@ async def test_instrumented_model_not_recording():
324
330
  @pytest.mark.anyio
325
331
  @requires_logfire_events
326
332
  async def test_instrumented_model_stream(capfire: CaptureLogfire):
327
- model = InstrumentedModel.from_logfire(MyModel(), event_mode='logs')
333
+ model = InstrumentedModel(MyModel(), event_mode='logs')
328
334
 
329
335
  messages: list[ModelMessage] = [
330
336
  ModelRequest(
@@ -380,6 +386,7 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire):
380
386
  'severity_text': None,
381
387
  'attributes': {
382
388
  'gen_ai.system': 'my_system',
389
+ 'gen_ai.message.index': 0,
383
390
  'event.name': 'gen_ai.user.message',
384
391
  },
385
392
  'timestamp': 2000000000,
@@ -406,7 +413,7 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire):
406
413
  @pytest.mark.anyio
407
414
  @requires_logfire_events
408
415
  async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
409
- model = InstrumentedModel.from_logfire(MyModel(), event_mode='logs')
416
+ model = InstrumentedModel(MyModel(), event_mode='logs')
410
417
 
411
418
  messages: list[ModelMessage] = [
412
419
  ModelRequest(
@@ -474,6 +481,7 @@ async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
474
481
  'severity_text': None,
475
482
  'attributes': {
476
483
  'gen_ai.system': 'my_system',
484
+ 'gen_ai.message.index': 0,
477
485
  'event.name': 'gen_ai.user.message',
478
486
  },
479
487
  'timestamp': 2000000000,
@@ -555,12 +563,14 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
555
563
  'event.name': 'gen_ai.system.message',
556
564
  'content': 'system_prompt',
557
565
  'role': 'system',
566
+ 'gen_ai.message.index': 0,
558
567
  'gen_ai.system': 'my_system',
559
568
  },
560
569
  {
561
570
  'event.name': 'gen_ai.user.message',
562
571
  'content': 'user_prompt',
563
572
  'role': 'user',
573
+ 'gen_ai.message.index': 0,
564
574
  'gen_ai.system': 'my_system',
565
575
  },
566
576
  {
@@ -568,6 +578,7 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
568
578
  'content': 'tool_return_content',
569
579
  'role': 'tool',
570
580
  'id': 'tool_call_3',
581
+ 'gen_ai.message.index': 0,
571
582
  'gen_ai.system': 'my_system',
572
583
  },
573
584
  {
@@ -579,6 +590,7 @@ Fix the errors and try again.\
579
590
  """,
580
591
  'role': 'tool',
581
592
  'id': 'tool_call_4',
593
+ 'gen_ai.message.index': 0,
582
594
  'gen_ai.system': 'my_system',
583
595
  },
584
596
  {
@@ -589,12 +601,14 @@ retry_prompt2
589
601
  Fix the errors and try again.\
590
602
  """,
591
603
  'role': 'user',
604
+ 'gen_ai.message.index': 0,
592
605
  'gen_ai.system': 'my_system',
593
606
  },
594
607
  {
595
608
  'event.name': 'gen_ai.assistant.message',
596
609
  'role': 'assistant',
597
610
  'content': 'text3',
611
+ 'gen_ai.message.index': 1,
598
612
  'gen_ai.system': 'my_system',
599
613
  },
600
614
  {
@@ -632,3 +646,35 @@ Fix the errors and try again.\
632
646
  },
633
647
  ]
634
648
  )
649
+
650
+
651
+ def test_messages_to_otel_events_serialization_errors():
652
+ class Foo:
653
+ def __repr__(self):
654
+ return 'Foo()'
655
+
656
+ class Bar:
657
+ def __repr__(self):
658
+ raise ValueError('error!')
659
+
660
+ messages = [
661
+ ModelResponse(parts=[ToolCallPart('tool', {'arg': Foo()})]),
662
+ ModelRequest(parts=[ToolReturnPart('tool', Bar())]),
663
+ ]
664
+
665
+ assert [
666
+ InstrumentedModel.event_to_dict(e) for e in InstrumentedModel.messages_to_otel_events(messages)
667
+ ] == snapshot(
668
+ [
669
+ {
670
+ 'body': "{'role': 'assistant', 'tool_calls': [{'id': None, 'type': 'function', 'function': {'name': 'tool', 'arguments': {'arg': Foo()}}}]}",
671
+ 'gen_ai.message.index': 0,
672
+ 'event.name': 'gen_ai.assistant.message',
673
+ },
674
+ {
675
+ 'body': 'Unable to serialize: error!',
676
+ 'gen_ai.message.index': 1,
677
+ 'event.name': 'gen_ai.tool.message',
678
+ },
679
+ ]
680
+ )
@@ -1183,6 +1183,42 @@ class TestMultipleToolCalls:
1183
1183
  tool_returns = [m for m in result.all_messages() if isinstance(m, ToolReturnPart)]
1184
1184
  assert tool_returns == snapshot([])
1185
1185
 
1186
+ def test_multiple_final_result_are_validated_correctly(self):
1187
+ """Tests that if multiple final results are returned, but one fails validation, the other is used."""
1188
+
1189
+ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
1190
+ assert info.result_tools is not None
1191
+ return ModelResponse(
1192
+ parts=[
1193
+ ToolCallPart('final_result', {'bad_value': 'first'}, tool_call_id='first'),
1194
+ ToolCallPart('final_result', {'value': 'second'}, tool_call_id='second'),
1195
+ ]
1196
+ )
1197
+
1198
+ agent = Agent(FunctionModel(return_model), result_type=self.ResultType, end_strategy='early')
1199
+ result = agent.run_sync('test multiple final results')
1200
+
1201
+ # Verify the result came from the second final tool
1202
+ assert result.data.value == 'second'
1203
+
1204
+ # Verify we got appropriate tool returns
1205
+ assert result.new_messages()[-1].parts == snapshot(
1206
+ [
1207
+ ToolReturnPart(
1208
+ tool_name='final_result',
1209
+ tool_call_id='first',
1210
+ content='Result tool not used - result failed validation.',
1211
+ timestamp=IsNow(tz=timezone.utc),
1212
+ ),
1213
+ ToolReturnPart(
1214
+ tool_name='final_result',
1215
+ content='Final result processed.',
1216
+ timestamp=IsNow(tz=timezone.utc),
1217
+ tool_call_id='second',
1218
+ ),
1219
+ ]
1220
+ )
1221
+
1186
1222
 
1187
1223
  async def test_model_settings_override() -> None:
1188
1224
  def return_settings(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
@@ -0,0 +1,193 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable
5
+
6
+ import pytest
7
+ from dirty_equals import IsJson
8
+ from inline_snapshot import snapshot
9
+ from typing_extensions import NotRequired, TypedDict
10
+
11
+ from pydantic_ai import Agent
12
+ from pydantic_ai.models.test import TestModel
13
+
14
+ try:
15
+ from logfire.testing import CaptureLogfire
16
+ except ImportError:
17
+ logfire_installed = False
18
+ else:
19
+ logfire_installed = True
20
+
21
+
22
+ class SpanSummary(TypedDict):
23
+ id: int
24
+ message: str
25
+ children: NotRequired[list[SpanSummary]]
26
+
27
+
28
+ @dataclass(init=False)
29
+ class LogfireSummary:
30
+ traces: list[SpanSummary]
31
+ attributes: dict[int, dict[str, Any]]
32
+
33
+ def __init__(self, capfire: CaptureLogfire):
34
+ spans = capfire.exporter.exported_spans_as_dict()
35
+ spans.sort(key=lambda s: s['start_time'])
36
+ self.traces = []
37
+ span_lookup: dict[tuple[str, str], SpanSummary] = {}
38
+ self.attributes = {}
39
+ id_counter = 0
40
+ for span in spans:
41
+ tid = span['context']['trace_id'], span['context']['span_id']
42
+ span_lookup[tid] = span_summary = SpanSummary(id=id_counter, message=span['attributes']['logfire.msg'])
43
+ self.attributes[id_counter] = span['attributes']
44
+ id_counter += 1
45
+ if parent := span['parent']:
46
+ parent_span = span_lookup[(parent['trace_id'], parent['span_id'])]
47
+ parent_span.setdefault('children', []).append(span_summary)
48
+ else:
49
+ self.traces.append(span_summary)
50
+
51
+
52
+ @pytest.fixture
53
+ def get_logfire_summary(capfire: CaptureLogfire) -> Callable[[], LogfireSummary]:
54
+ def get_summary() -> LogfireSummary:
55
+ return LogfireSummary(capfire)
56
+
57
+ return get_summary
58
+
59
+
60
+ @pytest.mark.skipif(not logfire_installed, reason='logfire not installed')
61
+ def test_logfire(get_logfire_summary: Callable[[], LogfireSummary]) -> None:
62
+ my_agent = Agent(model=TestModel(), instrument=True)
63
+
64
+ @my_agent.tool_plain
65
+ async def my_ret(x: int) -> str:
66
+ return str(x + 1)
67
+
68
+ result = my_agent.run_sync('Hello')
69
+ assert result.data == snapshot('{"my_ret":"1"}')
70
+
71
+ summary = get_logfire_summary()
72
+ assert summary.traces == snapshot(
73
+ [
74
+ {
75
+ 'id': 0,
76
+ 'message': 'my_agent run',
77
+ 'children': [
78
+ {'id': 1, 'message': 'preparing model request params'},
79
+ {'id': 2, 'message': 'chat test'},
80
+ {'id': 3, 'message': 'running tools: my_ret'},
81
+ {'id': 4, 'message': 'preparing model request params'},
82
+ {'id': 5, 'message': 'chat test'},
83
+ ],
84
+ }
85
+ ]
86
+ )
87
+ assert summary.attributes[0] == snapshot(
88
+ {
89
+ 'model_name': 'test',
90
+ 'agent_name': 'my_agent',
91
+ 'logfire.msg': 'my_agent run',
92
+ 'logfire.span_type': 'span',
93
+ 'gen_ai.usage.input_tokens': 103,
94
+ 'gen_ai.usage.output_tokens': 12,
95
+ 'all_messages_events': IsJson(
96
+ snapshot(
97
+ [
98
+ {
99
+ 'content': 'Hello',
100
+ 'role': 'user',
101
+ 'gen_ai.message.index': 0,
102
+ 'event.name': 'gen_ai.user.message',
103
+ },
104
+ {
105
+ 'role': 'assistant',
106
+ 'tool_calls': [
107
+ {
108
+ 'id': None,
109
+ 'type': 'function',
110
+ 'function': {
111
+ 'name': 'my_ret',
112
+ 'arguments': {'x': 0},
113
+ },
114
+ }
115
+ ],
116
+ 'gen_ai.message.index': 1,
117
+ 'event.name': 'gen_ai.assistant.message',
118
+ },
119
+ {
120
+ 'content': '1',
121
+ 'role': 'tool',
122
+ 'id': None,
123
+ 'gen_ai.message.index': 2,
124
+ 'event.name': 'gen_ai.tool.message',
125
+ },
126
+ {
127
+ 'role': 'assistant',
128
+ 'content': '{"my_ret":"1"}',
129
+ 'gen_ai.message.index': 3,
130
+ 'event.name': 'gen_ai.assistant.message',
131
+ },
132
+ ]
133
+ )
134
+ ),
135
+ 'final_result': '{"my_ret":"1"}',
136
+ 'logfire.json_schema': IsJson(
137
+ snapshot(
138
+ {
139
+ 'type': 'object',
140
+ 'properties': {'all_messages_events': {'type': 'array'}, 'final_result': {'type': 'object'}},
141
+ }
142
+ )
143
+ ),
144
+ }
145
+ )
146
+ assert summary.attributes[1] == snapshot(
147
+ {
148
+ 'run_step': 1,
149
+ 'logfire.span_type': 'span',
150
+ 'logfire.msg': 'preparing model request params',
151
+ }
152
+ )
153
+ assert summary.attributes[2] == snapshot(
154
+ {
155
+ 'gen_ai.operation.name': 'chat',
156
+ 'gen_ai.system': 'test',
157
+ 'gen_ai.request.model': 'test',
158
+ 'logfire.span_type': 'span',
159
+ 'logfire.msg': 'chat test',
160
+ 'gen_ai.response.model': 'test',
161
+ 'gen_ai.usage.input_tokens': 51,
162
+ 'gen_ai.usage.output_tokens': 4,
163
+ 'events': IsJson(
164
+ snapshot(
165
+ [
166
+ {
167
+ 'event.name': 'gen_ai.user.message',
168
+ 'content': 'Hello',
169
+ 'role': 'user',
170
+ 'gen_ai.message.index': 0,
171
+ 'gen_ai.system': 'test',
172
+ },
173
+ {
174
+ 'event.name': 'gen_ai.choice',
175
+ 'index': 0,
176
+ 'message': {
177
+ 'role': 'assistant',
178
+ 'tool_calls': [
179
+ {
180
+ 'id': None,
181
+ 'type': 'function',
182
+ 'function': {'name': 'my_ret', 'arguments': {'x': 0}},
183
+ }
184
+ ],
185
+ },
186
+ 'gen_ai.system': 'test',
187
+ },
188
+ ]
189
+ )
190
+ ),
191
+ 'logfire.json_schema': '{"type": "object", "properties": {"events": {"type": "array"}}}',
192
+ }
193
+ )
@@ -761,14 +761,14 @@ async def test_iter_stream_output():
761
761
  messages: list[str] = []
762
762
 
763
763
  stream_usage: Usage | None = None
764
- with agent.iter('Hello') as run:
764
+ async with agent.iter('Hello') as run:
765
765
  async for node in run:
766
766
  if agent.is_model_request_node(node):
767
767
  async with node.stream(run.ctx) as stream:
768
768
  async for chunk in stream.stream_output(debounce_by=None):
769
769
  messages.append(chunk)
770
770
  stream_usage = deepcopy(stream.usage())
771
- assert run.next_node == End(data=FinalResult(data='The bat sat on the mat.', tool_name=None))
771
+ assert run.next_node == End(data=FinalResult(data='The bat sat on the mat.', tool_name=None, tool_call_id=None))
772
772
  assert (
773
773
  run.usage()
774
774
  == stream_usage
@@ -800,7 +800,7 @@ async def test_iter_stream_responses():
800
800
  run: AgentRun
801
801
  stream: AgentStream
802
802
  messages: list[ModelResponse] = []
803
- with agent.iter('Hello') as run:
803
+ async with agent.iter('Hello') as run:
804
804
  async for node in run:
805
805
  if agent.is_model_request_node(node):
806
806
  async with node.stream(run.ctx) as stream:
@@ -843,7 +843,7 @@ async def test_stream_iter_structured_validator() -> None:
843
843
  return ResultType(value=data.value + ' (validated)')
844
844
 
845
845
  outputs: list[ResultType] = []
846
- with agent.iter('test') as run:
846
+ async with agent.iter('test') as run:
847
847
  async for node in run:
848
848
  if agent.is_model_request_node(node):
849
849
  async with node.stream(run.ctx) as stream:
@@ -44,6 +44,30 @@ def test_check_object_json_schema():
44
44
  object_schema = {'type': 'object', 'properties': {'a': {'type': 'string'}}}
45
45
  assert check_object_json_schema(object_schema) == object_schema
46
46
 
47
+ ref_schema = {
48
+ '$defs': {
49
+ 'JsonModel': {
50
+ 'properties': {
51
+ 'type': {'title': 'Type', 'type': 'string'},
52
+ 'items': {'anyOf': [{'$ref': '#/$defs/JsonModel'}, {'type': 'null'}]},
53
+ },
54
+ 'required': ['type', 'items'],
55
+ 'title': 'JsonModel',
56
+ 'type': 'object',
57
+ }
58
+ },
59
+ '$ref': '#/$defs/JsonModel',
60
+ }
61
+ assert check_object_json_schema(ref_schema) == {
62
+ 'properties': {
63
+ 'type': {'title': 'Type', 'type': 'string'},
64
+ 'items': {'anyOf': [{'$ref': '#/$defs/JsonModel'}, {'type': 'null'}]},
65
+ },
66
+ 'required': ['type', 'items'],
67
+ 'title': 'JsonModel',
68
+ 'type': 'object',
69
+ }
70
+
47
71
  array_schema = {'type': 'array', 'items': {'type': 'string'}}
48
72
  with pytest.raises(UserError, match='^Schema must be an object$'):
49
73
  check_object_json_schema(array_schema)
@@ -1,261 +0,0 @@
1
- from __future__ import annotations as _annotations
2
-
3
- from dataclasses import dataclass
4
- from typing import Any, Callable
5
-
6
- import pytest
7
- from dirty_equals import IsInt, IsJson, IsStr
8
- from inline_snapshot import snapshot
9
- from typing_extensions import NotRequired, TypedDict
10
-
11
- from pydantic_ai import Agent
12
- from pydantic_ai.models.test import TestModel
13
-
14
- try:
15
- from logfire.testing import CaptureLogfire
16
- except ImportError:
17
- logfire_installed = False
18
- else:
19
- logfire_installed = True
20
-
21
-
22
- class SpanSummary(TypedDict):
23
- id: int
24
- message: str
25
- children: NotRequired[list[SpanSummary]]
26
-
27
-
28
- @dataclass(init=False)
29
- class LogfireSummary:
30
- traces: list[SpanSummary]
31
- attributes: dict[int, dict[str, Any]]
32
-
33
- def __init__(self, capfire: CaptureLogfire):
34
- spans = capfire.exporter.exported_spans_as_dict()
35
- spans.sort(key=lambda s: s['start_time'])
36
- self.traces = []
37
- span_lookup: dict[tuple[str, str], SpanSummary] = {}
38
- self.attributes = {}
39
- id_counter = 0
40
- for span in spans:
41
- tid = span['context']['trace_id'], span['context']['span_id']
42
- span_lookup[tid] = span_summary = SpanSummary(id=id_counter, message=span['attributes']['logfire.msg'])
43
- self.attributes[id_counter] = span['attributes']
44
- id_counter += 1
45
- if parent := span['parent']:
46
- parent_span = span_lookup[(parent['trace_id'], parent['span_id'])]
47
- parent_span.setdefault('children', []).append(span_summary)
48
- else:
49
- self.traces.append(span_summary)
50
-
51
-
52
- @pytest.fixture
53
- def get_logfire_summary(capfire: CaptureLogfire) -> Callable[[], LogfireSummary]:
54
- def get_summary() -> LogfireSummary:
55
- return LogfireSummary(capfire)
56
-
57
- return get_summary
58
-
59
-
60
- @pytest.mark.skipif(not logfire_installed, reason='logfire not installed')
61
- def test_logfire(get_logfire_summary: Callable[[], LogfireSummary]) -> None:
62
- my_agent = Agent(model=TestModel())
63
-
64
- @my_agent.tool_plain
65
- async def my_ret(x: int) -> str:
66
- return str(x + 1)
67
-
68
- result = my_agent.run_sync('Hello')
69
- assert result.data == snapshot('{"my_ret":"1"}')
70
-
71
- summary = get_logfire_summary()
72
- assert summary.traces == snapshot(
73
- [
74
- {
75
- 'id': 0,
76
- 'message': 'my_agent run prompt=Hello',
77
- 'children': [
78
- {'id': 1, 'message': 'preparing model request params run_step=1'},
79
- {'id': 2, 'message': 'model request'},
80
- {
81
- 'id': 3,
82
- 'message': 'handle model response -> tool-return',
83
- 'children': [{'id': 4, 'message': "running tools=['my_ret']"}],
84
- },
85
- {'id': 5, 'message': 'preparing model request params run_step=2'},
86
- {'id': 6, 'message': 'model request'},
87
- {'id': 7, 'message': 'handle model response -> final result'},
88
- ],
89
- }
90
- ]
91
- )
92
- assert summary.attributes[0] == snapshot(
93
- {
94
- 'code.filepath': 'test_logfire.py',
95
- 'code.function': 'test_logfire',
96
- 'code.lineno': 123,
97
- 'prompt': 'Hello',
98
- 'agent': IsJson(
99
- {
100
- 'model': {
101
- 'call_tools': 'all',
102
- 'custom_result_text': None,
103
- 'custom_result_args': None,
104
- 'seed': 0,
105
- 'last_model_request_parameters': None,
106
- },
107
- 'name': 'my_agent',
108
- 'end_strategy': 'early',
109
- 'model_settings': None,
110
- }
111
- ),
112
- 'model_name': 'test',
113
- 'agent_name': 'my_agent',
114
- 'logfire.msg_template': '{agent_name} run {prompt=}',
115
- 'logfire.msg': 'my_agent run prompt=Hello',
116
- 'logfire.span_type': 'span',
117
- 'all_messages': IsJson(
118
- [
119
- {
120
- 'parts': [
121
- {
122
- 'content': 'Hello',
123
- 'timestamp': IsStr(regex=r'\d{4}-\d{2}-.+'),
124
- 'part_kind': 'user-prompt',
125
- },
126
- ],
127
- 'kind': 'request',
128
- },
129
- {
130
- 'parts': [
131
- {'tool_name': 'my_ret', 'args': {'x': 0}, 'tool_call_id': None, 'part_kind': 'tool-call'}
132
- ],
133
- 'model_name': 'test',
134
- 'timestamp': IsStr(regex=r'\d{4}-\d{2}-.+'),
135
- 'kind': 'response',
136
- },
137
- {
138
- 'parts': [
139
- {
140
- 'tool_name': 'my_ret',
141
- 'content': '1',
142
- 'tool_call_id': None,
143
- 'timestamp': IsStr(regex=r'\d{4}-\d{2}-.+'),
144
- 'part_kind': 'tool-return',
145
- },
146
- ],
147
- 'kind': 'request',
148
- },
149
- {
150
- 'parts': [{'content': '{"my_ret":"1"}', 'part_kind': 'text'}],
151
- 'model_name': 'test',
152
- 'timestamp': IsStr(regex=r'\d{4}-\d{2}-.+'),
153
- 'kind': 'response',
154
- },
155
- ]
156
- ),
157
- 'usage': IsJson(
158
- {'requests': 2, 'request_tokens': 103, 'response_tokens': 12, 'total_tokens': 115, 'details': None}
159
- ),
160
- 'logfire.json_schema': IsJson(
161
- {
162
- 'type': 'object',
163
- 'properties': {
164
- 'prompt': {},
165
- 'agent': {
166
- 'type': 'object',
167
- 'title': 'Agent',
168
- 'x-python-datatype': 'dataclass',
169
- 'properties': {
170
- 'model': {'type': 'object', 'title': 'TestModel', 'x-python-datatype': 'dataclass'}
171
- },
172
- },
173
- 'model_name': {},
174
- 'agent_name': {},
175
- 'all_messages': {
176
- 'type': 'array',
177
- 'prefixItems': [
178
- {
179
- 'type': 'object',
180
- 'title': 'ModelRequest',
181
- 'x-python-datatype': 'dataclass',
182
- 'properties': {
183
- 'parts': {
184
- 'type': 'array',
185
- 'items': {
186
- 'type': 'object',
187
- 'title': 'UserPromptPart',
188
- 'x-python-datatype': 'dataclass',
189
- 'properties': {'timestamp': {'type': 'string', 'format': 'date-time'}},
190
- },
191
- }
192
- },
193
- },
194
- {
195
- 'type': 'object',
196
- 'title': 'ModelResponse',
197
- 'x-python-datatype': 'dataclass',
198
- 'properties': {
199
- 'parts': {
200
- 'type': 'array',
201
- 'items': {
202
- 'type': 'object',
203
- 'title': 'ToolCallPart',
204
- 'x-python-datatype': 'dataclass',
205
- },
206
- },
207
- 'timestamp': {'type': 'string', 'format': 'date-time'},
208
- },
209
- },
210
- {
211
- 'type': 'object',
212
- 'title': 'ModelRequest',
213
- 'x-python-datatype': 'dataclass',
214
- 'properties': {
215
- 'parts': {
216
- 'type': 'array',
217
- 'items': {
218
- 'type': 'object',
219
- 'title': 'ToolReturnPart',
220
- 'x-python-datatype': 'dataclass',
221
- 'properties': {'timestamp': {'type': 'string', 'format': 'date-time'}},
222
- },
223
- }
224
- },
225
- },
226
- {
227
- 'type': 'object',
228
- 'title': 'ModelResponse',
229
- 'x-python-datatype': 'dataclass',
230
- 'properties': {
231
- 'parts': {
232
- 'type': 'array',
233
- 'items': {
234
- 'type': 'object',
235
- 'title': 'TextPart',
236
- 'x-python-datatype': 'dataclass',
237
- },
238
- },
239
- 'timestamp': {'type': 'string', 'format': 'date-time'},
240
- },
241
- },
242
- ],
243
- },
244
- 'usage': {'type': 'object', 'title': 'Usage', 'x-python-datatype': 'dataclass'},
245
- },
246
- }
247
- ),
248
- }
249
- )
250
- assert summary.attributes[1] == snapshot(
251
- {
252
- 'code.filepath': 'test_logfire.py',
253
- 'code.function': 'test_logfire',
254
- 'code.lineno': IsInt(),
255
- 'run_step': 1,
256
- 'logfire.msg_template': 'preparing model request params {run_step=}',
257
- 'logfire.span_type': 'span',
258
- 'logfire.msg': 'preparing model request params run_step=1',
259
- 'logfire.json_schema': '{"type":"object","properties":{"run_step":{}}}',
260
- }
261
- )
File without changes
File without changes
File without changes
File without changes