pydantic-ai 0.0.33__tar.gz → 0.0.34__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/PKG-INFO +3 -3
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/pyproject.toml +5 -5
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_instrumented.py +15 -16
- pydantic_ai-0.0.34/tests/test_cli.py +52 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_logfire.py +82 -32
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/.gitignore +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/LICENSE +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/Makefile +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/README.md +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/__init__.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/assets/kiwi.png +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/assets/marcelo.mp3 +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/conftest.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/graph/test_graph.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/graph/test_history.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/graph/test_mermaid.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/graph/test_state.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/graph/test_utils.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/import_examples.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/json_body_serializer.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_anthropic.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_cohere.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_fallback.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_gemini.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_groq.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_mistral.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_openai.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/test_vertexai.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/providers/__init__.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/providers/test_deepseek.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/providers/test_google_gla.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/providers/test_google_vertex.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/providers/test_provider_names.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_agent.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_deps.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_examples.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_json_body_serializer.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_live.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_streaming.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_tools.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_usage_limits.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/test_utils.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/typed_agent.py +0 -0
- {pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/typed_graph.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.34
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
|
|
|
28
28
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.9
|
|
31
|
-
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[anthropic,cli,cohere,groq,mistral,openai,vertexai]==0.0.34
|
|
32
32
|
Provides-Extra: examples
|
|
33
|
-
Requires-Dist: pydantic-ai-examples==0.0.
|
|
33
|
+
Requires-Dist: pydantic-ai-examples==0.0.34; extra == 'examples'
|
|
34
34
|
Provides-Extra: logfire
|
|
35
35
|
Requires-Dist: logfire>=2.3; extra == 'logfire'
|
|
36
36
|
Description-Content-Type: text/markdown
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.34"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs"
|
|
9
9
|
authors = [
|
|
10
10
|
{ name = "Samuel Colvin", email = "samuel@pydantic.dev" },
|
|
@@ -35,9 +35,8 @@ classifiers = [
|
|
|
35
35
|
"Framework :: Pydantic :: 2",
|
|
36
36
|
]
|
|
37
37
|
requires-python = ">=3.9"
|
|
38
|
-
|
|
39
38
|
dependencies = [
|
|
40
|
-
"pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.
|
|
39
|
+
"pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,cli]==0.0.34",
|
|
41
40
|
]
|
|
42
41
|
|
|
43
42
|
[project.urls]
|
|
@@ -47,7 +46,7 @@ Documentation = "https://ai.pydantic.dev"
|
|
|
47
46
|
Changelog = "https://github.com/pydantic/pydantic-ai/releases"
|
|
48
47
|
|
|
49
48
|
[project.optional-dependencies]
|
|
50
|
-
examples = ["pydantic-ai-examples==0.0.
|
|
49
|
+
examples = ["pydantic-ai-examples==0.0.34"]
|
|
51
50
|
logfire = ["logfire>=2.3"]
|
|
52
51
|
|
|
53
52
|
[tool.uv.sources]
|
|
@@ -165,13 +164,14 @@ exclude_lines = [
|
|
|
165
164
|
'typing.assert_never',
|
|
166
165
|
'$\s*assert_never\(',
|
|
167
166
|
'if __name__ == .__main__.:',
|
|
167
|
+
'except ImportError as _import_error:',
|
|
168
168
|
]
|
|
169
169
|
|
|
170
170
|
[tool.logfire]
|
|
171
171
|
ignore_no_config = true
|
|
172
172
|
|
|
173
173
|
[tool.inline-snapshot]
|
|
174
|
-
format-command="ruff format --stdin-filename {filename}"
|
|
174
|
+
format-command = "ruff format --stdin-filename {filename}"
|
|
175
175
|
|
|
176
176
|
[tool.inline-snapshot.shortcuts]
|
|
177
177
|
snap-fix = ["create", "fix"]
|
|
@@ -8,6 +8,8 @@ import pytest
|
|
|
8
8
|
from dirty_equals import IsJson
|
|
9
9
|
from inline_snapshot import snapshot
|
|
10
10
|
from logfire_api import DEFAULT_LOGFIRE_INSTANCE
|
|
11
|
+
from opentelemetry._events import NoOpEventLoggerProvider
|
|
12
|
+
from opentelemetry.trace import NoOpTracerProvider
|
|
11
13
|
|
|
12
14
|
from pydantic_ai.messages import (
|
|
13
15
|
ModelMessage,
|
|
@@ -25,6 +27,7 @@ from pydantic_ai.messages import (
|
|
|
25
27
|
UserPromptPart,
|
|
26
28
|
)
|
|
27
29
|
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
|
|
30
|
+
from pydantic_ai.models.instrumented import InstrumentationSettings, InstrumentedModel
|
|
28
31
|
from pydantic_ai.settings import ModelSettings
|
|
29
32
|
from pydantic_ai.usage import Usage
|
|
30
33
|
|
|
@@ -32,11 +35,6 @@ from ..conftest import try_import
|
|
|
32
35
|
|
|
33
36
|
with try_import() as imports_successful:
|
|
34
37
|
from logfire.testing import CaptureLogfire
|
|
35
|
-
from opentelemetry._events import NoOpEventLoggerProvider
|
|
36
|
-
from opentelemetry.trace import NoOpTracerProvider
|
|
37
|
-
|
|
38
|
-
from pydantic_ai.models.instrumented import InstrumentedModel
|
|
39
|
-
|
|
40
38
|
|
|
41
39
|
pytestmark = [
|
|
42
40
|
pytest.mark.skipif(not imports_successful(), reason='logfire not installed'),
|
|
@@ -103,10 +101,9 @@ class MyResponseStream(StreamedResponse):
|
|
|
103
101
|
return datetime(2022, 1, 1)
|
|
104
102
|
|
|
105
103
|
|
|
106
|
-
@pytest.mark.anyio
|
|
107
104
|
@requires_logfire_events
|
|
108
105
|
async def test_instrumented_model(capfire: CaptureLogfire):
|
|
109
|
-
model = InstrumentedModel(MyModel(), event_mode='logs')
|
|
106
|
+
model = InstrumentedModel(MyModel(), InstrumentationSettings(event_mode='logs'))
|
|
110
107
|
assert model.system == 'my_system'
|
|
111
108
|
assert model.model_name == 'my_model'
|
|
112
109
|
|
|
@@ -193,7 +190,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
|
|
|
193
190
|
'trace_flags': 1,
|
|
194
191
|
},
|
|
195
192
|
{
|
|
196
|
-
'body': {'content': 'tool_return_content', 'role': 'tool', 'id': 'tool_call_3'},
|
|
193
|
+
'body': {'content': 'tool_return_content', 'role': 'tool', 'id': 'tool_call_3', 'name': 'tool3'},
|
|
197
194
|
'severity_number': 9,
|
|
198
195
|
'severity_text': None,
|
|
199
196
|
'attributes': {
|
|
@@ -216,6 +213,7 @@ Fix the errors and try again.\
|
|
|
216
213
|
""",
|
|
217
214
|
'role': 'tool',
|
|
218
215
|
'id': 'tool_call_4',
|
|
216
|
+
'name': 'tool4',
|
|
219
217
|
},
|
|
220
218
|
'severity_number': 9,
|
|
221
219
|
'severity_text': None,
|
|
@@ -311,9 +309,11 @@ Fix the errors and try again.\
|
|
|
311
309
|
)
|
|
312
310
|
|
|
313
311
|
|
|
314
|
-
@pytest.mark.anyio
|
|
315
312
|
async def test_instrumented_model_not_recording():
|
|
316
|
-
model = InstrumentedModel(
|
|
313
|
+
model = InstrumentedModel(
|
|
314
|
+
MyModel(),
|
|
315
|
+
InstrumentationSettings(tracer_provider=NoOpTracerProvider(), event_logger_provider=NoOpEventLoggerProvider()),
|
|
316
|
+
)
|
|
317
317
|
|
|
318
318
|
messages: list[ModelMessage] = [ModelRequest(parts=[SystemPromptPart('system_prompt')])]
|
|
319
319
|
await model.request(
|
|
@@ -327,10 +327,9 @@ async def test_instrumented_model_not_recording():
|
|
|
327
327
|
)
|
|
328
328
|
|
|
329
329
|
|
|
330
|
-
@pytest.mark.anyio
|
|
331
330
|
@requires_logfire_events
|
|
332
331
|
async def test_instrumented_model_stream(capfire: CaptureLogfire):
|
|
333
|
-
model = InstrumentedModel(MyModel(), event_mode='logs')
|
|
332
|
+
model = InstrumentedModel(MyModel(), InstrumentationSettings(event_mode='logs'))
|
|
334
333
|
|
|
335
334
|
messages: list[ModelMessage] = [
|
|
336
335
|
ModelRequest(
|
|
@@ -410,10 +409,9 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire):
|
|
|
410
409
|
)
|
|
411
410
|
|
|
412
411
|
|
|
413
|
-
@pytest.mark.anyio
|
|
414
412
|
@requires_logfire_events
|
|
415
413
|
async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
|
|
416
|
-
model = InstrumentedModel(MyModel(), event_mode='logs')
|
|
414
|
+
model = InstrumentedModel(MyModel(), InstrumentationSettings(event_mode='logs'))
|
|
417
415
|
|
|
418
416
|
messages: list[ModelMessage] = [
|
|
419
417
|
ModelRequest(
|
|
@@ -505,9 +503,8 @@ async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
|
|
|
505
503
|
)
|
|
506
504
|
|
|
507
505
|
|
|
508
|
-
@pytest.mark.anyio
|
|
509
506
|
async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
|
|
510
|
-
model = InstrumentedModel(MyModel(), event_mode='attributes')
|
|
507
|
+
model = InstrumentedModel(MyModel(), InstrumentationSettings(event_mode='attributes'))
|
|
511
508
|
assert model.system == 'my_system'
|
|
512
509
|
assert model.model_name == 'my_model'
|
|
513
510
|
|
|
@@ -577,6 +574,7 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
|
|
|
577
574
|
'event.name': 'gen_ai.tool.message',
|
|
578
575
|
'content': 'tool_return_content',
|
|
579
576
|
'role': 'tool',
|
|
577
|
+
'name': 'tool3',
|
|
580
578
|
'id': 'tool_call_3',
|
|
581
579
|
'gen_ai.message.index': 0,
|
|
582
580
|
'gen_ai.system': 'my_system',
|
|
@@ -589,6 +587,7 @@ retry_prompt1
|
|
|
589
587
|
Fix the errors and try again.\
|
|
590
588
|
""",
|
|
591
589
|
'role': 'tool',
|
|
590
|
+
'name': 'tool4',
|
|
592
591
|
'id': 'tool_call_4',
|
|
593
592
|
'gen_ai.message.index': 0,
|
|
594
593
|
'gen_ai.system': 'my_system',
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from dirty_equals import IsStr
|
|
3
|
+
from inline_snapshot import snapshot
|
|
4
|
+
from pytest import CaptureFixture
|
|
5
|
+
|
|
6
|
+
from .conftest import try_import
|
|
7
|
+
|
|
8
|
+
with try_import() as imports_successful:
|
|
9
|
+
from pydantic_ai._cli import cli
|
|
10
|
+
|
|
11
|
+
pytestmark = pytest.mark.skipif(not imports_successful(), reason='install cli extras to run cli tests')
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def test_cli_version(capfd: CaptureFixture[str]):
|
|
15
|
+
assert cli(['--version']) == 0
|
|
16
|
+
assert capfd.readouterr().out.startswith('pai - PydanticAI CLI')
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_cli_help(capfd: CaptureFixture[str]):
|
|
20
|
+
with pytest.raises(SystemExit) as exc:
|
|
21
|
+
cli(['--help'])
|
|
22
|
+
assert exc.value.code == 0
|
|
23
|
+
|
|
24
|
+
assert capfd.readouterr().out.splitlines() == snapshot(
|
|
25
|
+
[
|
|
26
|
+
'usage: pai [-h] [--model [MODEL]] [--no-stream] [--version] [prompt]',
|
|
27
|
+
'',
|
|
28
|
+
IsStr(),
|
|
29
|
+
'',
|
|
30
|
+
'Special prompt:',
|
|
31
|
+
'* `/exit` - exit the interactive mode',
|
|
32
|
+
'* `/markdown` - show the last markdown output of the last question',
|
|
33
|
+
'* `/multiline` - toggle multiline mode',
|
|
34
|
+
'',
|
|
35
|
+
'positional arguments:',
|
|
36
|
+
' prompt AI Prompt, if omitted fall into interactive mode',
|
|
37
|
+
'',
|
|
38
|
+
IsStr(),
|
|
39
|
+
' -h, --help show this help message and exit',
|
|
40
|
+
' --model [MODEL] Model to use, it should be "<provider>:<model>" e.g. "openai:gpt-4o". If omitted it will default to "openai:gpt-4o"',
|
|
41
|
+
' --no-stream Whether to stream responses from OpenAI',
|
|
42
|
+
' --version Show version and exit',
|
|
43
|
+
]
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_invalid_model(capfd: CaptureFixture[str]):
|
|
48
|
+
assert cli(['--model', 'invalid_model']) == 1
|
|
49
|
+
assert capfd.readouterr().out == snapshot("""\
|
|
50
|
+
pai - PydanticAI CLI v0.0.34
|
|
51
|
+
Invalid model "invalid_model"
|
|
52
|
+
""")
|
|
@@ -9,6 +9,7 @@ from inline_snapshot import snapshot
|
|
|
9
9
|
from typing_extensions import NotRequired, TypedDict
|
|
10
10
|
|
|
11
11
|
from pydantic_ai import Agent
|
|
12
|
+
from pydantic_ai.models.instrumented import InstrumentationSettings, InstrumentedModel
|
|
12
13
|
from pydantic_ai.models.test import TestModel
|
|
13
14
|
|
|
14
15
|
try:
|
|
@@ -58,8 +59,17 @@ def get_logfire_summary(capfire: CaptureLogfire) -> Callable[[], LogfireSummary]
|
|
|
58
59
|
|
|
59
60
|
|
|
60
61
|
@pytest.mark.skipif(not logfire_installed, reason='logfire not installed')
|
|
61
|
-
|
|
62
|
-
|
|
62
|
+
@pytest.mark.parametrize(
|
|
63
|
+
'instrument',
|
|
64
|
+
[
|
|
65
|
+
True,
|
|
66
|
+
False,
|
|
67
|
+
InstrumentationSettings(event_mode='attributes'),
|
|
68
|
+
InstrumentationSettings(event_mode='logs'),
|
|
69
|
+
],
|
|
70
|
+
)
|
|
71
|
+
def test_logfire(get_logfire_summary: Callable[[], LogfireSummary], instrument: InstrumentationSettings | bool) -> None:
|
|
72
|
+
my_agent = Agent(model=TestModel(), instrument=instrument)
|
|
63
73
|
|
|
64
74
|
@my_agent.tool_plain
|
|
65
75
|
async def my_ret(x: int) -> str:
|
|
@@ -69,6 +79,10 @@ def test_logfire(get_logfire_summary: Callable[[], LogfireSummary]) -> None:
|
|
|
69
79
|
assert result.data == snapshot('{"my_ret":"1"}')
|
|
70
80
|
|
|
71
81
|
summary = get_logfire_summary()
|
|
82
|
+
if instrument is False:
|
|
83
|
+
assert summary.traces == []
|
|
84
|
+
return
|
|
85
|
+
|
|
72
86
|
assert summary.traces == snapshot(
|
|
73
87
|
[
|
|
74
88
|
{
|
|
@@ -120,6 +134,7 @@ def test_logfire(get_logfire_summary: Callable[[], LogfireSummary]) -> None:
|
|
|
120
134
|
'content': '1',
|
|
121
135
|
'role': 'tool',
|
|
122
136
|
'id': None,
|
|
137
|
+
'name': 'my_ret',
|
|
123
138
|
'gen_ai.message.index': 2,
|
|
124
139
|
'event.name': 'gen_ai.tool.message',
|
|
125
140
|
},
|
|
@@ -150,7 +165,44 @@ def test_logfire(get_logfire_summary: Callable[[], LogfireSummary]) -> None:
|
|
|
150
165
|
'logfire.msg': 'preparing model request params',
|
|
151
166
|
}
|
|
152
167
|
)
|
|
153
|
-
|
|
168
|
+
chat_span_attributes = summary.attributes[2]
|
|
169
|
+
if instrument is True or instrument.event_mode == 'attributes':
|
|
170
|
+
attribute_mode_attributes = {k: chat_span_attributes.pop(k) for k in ['events', 'logfire.json_schema']}
|
|
171
|
+
assert attribute_mode_attributes == snapshot(
|
|
172
|
+
{
|
|
173
|
+
'events': IsJson(
|
|
174
|
+
snapshot(
|
|
175
|
+
[
|
|
176
|
+
{
|
|
177
|
+
'event.name': 'gen_ai.user.message',
|
|
178
|
+
'content': 'Hello',
|
|
179
|
+
'role': 'user',
|
|
180
|
+
'gen_ai.message.index': 0,
|
|
181
|
+
'gen_ai.system': 'test',
|
|
182
|
+
},
|
|
183
|
+
{
|
|
184
|
+
'event.name': 'gen_ai.choice',
|
|
185
|
+
'index': 0,
|
|
186
|
+
'message': {
|
|
187
|
+
'role': 'assistant',
|
|
188
|
+
'tool_calls': [
|
|
189
|
+
{
|
|
190
|
+
'id': None,
|
|
191
|
+
'type': 'function',
|
|
192
|
+
'function': {'name': 'my_ret', 'arguments': {'x': 0}},
|
|
193
|
+
}
|
|
194
|
+
],
|
|
195
|
+
},
|
|
196
|
+
'gen_ai.system': 'test',
|
|
197
|
+
},
|
|
198
|
+
]
|
|
199
|
+
)
|
|
200
|
+
),
|
|
201
|
+
'logfire.json_schema': '{"type": "object", "properties": {"events": {"type": "array"}}}',
|
|
202
|
+
}
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
assert chat_span_attributes == snapshot(
|
|
154
206
|
{
|
|
155
207
|
'gen_ai.operation.name': 'chat',
|
|
156
208
|
'gen_ai.system': 'test',
|
|
@@ -160,34 +212,32 @@ def test_logfire(get_logfire_summary: Callable[[], LogfireSummary]) -> None:
|
|
|
160
212
|
'gen_ai.response.model': 'test',
|
|
161
213
|
'gen_ai.usage.input_tokens': 51,
|
|
162
214
|
'gen_ai.usage.output_tokens': 4,
|
|
163
|
-
'events': IsJson(
|
|
164
|
-
snapshot(
|
|
165
|
-
[
|
|
166
|
-
{
|
|
167
|
-
'event.name': 'gen_ai.user.message',
|
|
168
|
-
'content': 'Hello',
|
|
169
|
-
'role': 'user',
|
|
170
|
-
'gen_ai.message.index': 0,
|
|
171
|
-
'gen_ai.system': 'test',
|
|
172
|
-
},
|
|
173
|
-
{
|
|
174
|
-
'event.name': 'gen_ai.choice',
|
|
175
|
-
'index': 0,
|
|
176
|
-
'message': {
|
|
177
|
-
'role': 'assistant',
|
|
178
|
-
'tool_calls': [
|
|
179
|
-
{
|
|
180
|
-
'id': None,
|
|
181
|
-
'type': 'function',
|
|
182
|
-
'function': {'name': 'my_ret', 'arguments': {'x': 0}},
|
|
183
|
-
}
|
|
184
|
-
],
|
|
185
|
-
},
|
|
186
|
-
'gen_ai.system': 'test',
|
|
187
|
-
},
|
|
188
|
-
]
|
|
189
|
-
)
|
|
190
|
-
),
|
|
191
|
-
'logfire.json_schema': '{"type": "object", "properties": {"events": {"type": "array"}}}',
|
|
192
215
|
}
|
|
193
216
|
)
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def test_instrument_all():
|
|
220
|
+
model = TestModel()
|
|
221
|
+
agent = Agent()
|
|
222
|
+
|
|
223
|
+
def get_model():
|
|
224
|
+
return agent._get_model(model) # type: ignore
|
|
225
|
+
|
|
226
|
+
Agent.instrument_all(False)
|
|
227
|
+
assert get_model() is model
|
|
228
|
+
|
|
229
|
+
Agent.instrument_all()
|
|
230
|
+
m = get_model()
|
|
231
|
+
assert isinstance(m, InstrumentedModel)
|
|
232
|
+
assert m.wrapped is model
|
|
233
|
+
assert m.options.event_mode == InstrumentationSettings().event_mode
|
|
234
|
+
|
|
235
|
+
options = InstrumentationSettings(event_mode='logs')
|
|
236
|
+
Agent.instrument_all(options)
|
|
237
|
+
m = get_model()
|
|
238
|
+
assert isinstance(m, InstrumentedModel)
|
|
239
|
+
assert m.wrapped is model
|
|
240
|
+
assert m.options is options
|
|
241
|
+
|
|
242
|
+
Agent.instrument_all(False)
|
|
243
|
+
assert get_model() is model
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.0.33 → pydantic_ai-0.0.34}/tests/models/cassettes/test_groq/test_image_url_input.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|