pydantic-ai 0.0.23__tar.gz → 0.0.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/Makefile +5 -1
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/PKG-INFO +5 -9
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/pyproject.toml +8 -12
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/conftest.py +21 -3
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/graph/test_graph.py +10 -10
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/graph/test_history.py +6 -5
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/graph/test_mermaid.py +3 -3
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/graph/test_state.py +3 -3
- pydantic_ai-0.0.25/tests/json_body_serializer.py +77 -0
- pydantic_ai-0.0.25/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +212 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_anthropic.py +99 -8
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_gemini.py +39 -9
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_groq.py +7 -7
- pydantic_ai-0.0.25/tests/models/test_instrumented.py +496 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_mistral.py +17 -17
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_openai.py +15 -11
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_agent.py +126 -128
- pydantic_ai-0.0.25/tests/test_json_body_serializer.py +177 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_streaming.py +9 -13
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_usage_limits.py +31 -27
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/typed_agent.py +3 -3
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/typed_graph.py +3 -3
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/.gitignore +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/LICENSE +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/README.md +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/__init__.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/import_examples.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_cohere.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/models/test_vertexai.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_deps.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_examples.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_live.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_logfire.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_tools.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.25}/tests/test_utils.py +0 -0
|
@@ -64,7 +64,11 @@ testcov: test ## Run tests and generate a coverage report
|
|
|
64
64
|
|
|
65
65
|
.PHONY: update-examples
|
|
66
66
|
update-examples: ## Update documentation examples
|
|
67
|
-
uv run -m pytest --update-examples
|
|
67
|
+
uv run -m pytest --update-examples tests/test_examples.py
|
|
68
|
+
|
|
69
|
+
.PHONY: update-vcr-tests
|
|
70
|
+
update-vcr-tests: ## Update tests using VCR that hit LLM APIs; note you'll need to set API keys as appropriate
|
|
71
|
+
uv run -m pytest --record-mode=rewrite tests
|
|
68
72
|
|
|
69
73
|
# `--no-strict` so you can build the docs without insiders packages
|
|
70
74
|
.PHONY: docs
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.25
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -10,17 +10,12 @@ Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
|
10
10
|
License-Expression: MIT
|
|
11
11
|
License-File: LICENSE
|
|
12
12
|
Classifier: Development Status :: 4 - Beta
|
|
13
|
-
Classifier: Environment :: Console
|
|
14
|
-
Classifier: Environment :: MacOS X
|
|
15
13
|
Classifier: Framework :: Pydantic
|
|
16
14
|
Classifier: Framework :: Pydantic :: 2
|
|
17
|
-
Classifier: Framework :: Pytest
|
|
18
15
|
Classifier: Intended Audience :: Developers
|
|
19
16
|
Classifier: Intended Audience :: Information Technology
|
|
20
|
-
Classifier: Intended Audience :: System Administrators
|
|
21
17
|
Classifier: License :: OSI Approved :: MIT License
|
|
22
|
-
Classifier: Operating System ::
|
|
23
|
-
Classifier: Operating System :: Unix
|
|
18
|
+
Classifier: Operating System :: OS Independent
|
|
24
19
|
Classifier: Programming Language :: Python
|
|
25
20
|
Classifier: Programming Language :: Python :: 3
|
|
26
21
|
Classifier: Programming Language :: Python :: 3 :: Only
|
|
@@ -30,11 +25,12 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
30
25
|
Classifier: Programming Language :: Python :: 3.12
|
|
31
26
|
Classifier: Programming Language :: Python :: 3.13
|
|
32
27
|
Classifier: Topic :: Internet
|
|
28
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
33
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
34
30
|
Requires-Python: >=3.9
|
|
35
|
-
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.25
|
|
36
32
|
Provides-Extra: examples
|
|
37
|
-
Requires-Dist: pydantic-ai-examples==0.0.
|
|
33
|
+
Requires-Dist: pydantic-ai-examples==0.0.25; extra == 'examples'
|
|
38
34
|
Provides-Extra: logfire
|
|
39
35
|
Requires-Dist: logfire>=2.3; extra == 'logfire'
|
|
40
36
|
Description-Content-Type: text/markdown
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.25"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs"
|
|
9
9
|
authors = [
|
|
10
10
|
{ name = "Samuel Colvin", email = "samuel@pydantic.dev" },
|
|
@@ -23,21 +23,17 @@ classifiers = [
|
|
|
23
23
|
"Programming Language :: Python :: 3.13",
|
|
24
24
|
"Intended Audience :: Developers",
|
|
25
25
|
"Intended Audience :: Information Technology",
|
|
26
|
-
"Intended Audience :: System Administrators",
|
|
27
26
|
"License :: OSI Approved :: MIT License",
|
|
28
|
-
"Operating System ::
|
|
29
|
-
"Operating System :: POSIX :: Linux",
|
|
30
|
-
"Environment :: Console",
|
|
31
|
-
"Environment :: MacOS X",
|
|
32
|
-
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
27
|
+
"Operating System :: OS Independent",
|
|
33
28
|
"Topic :: Internet",
|
|
29
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
30
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
34
31
|
"Framework :: Pydantic",
|
|
35
32
|
"Framework :: Pydantic :: 2",
|
|
36
|
-
"Framework :: Pytest",
|
|
37
33
|
]
|
|
38
34
|
requires-python = ">=3.9"
|
|
39
35
|
|
|
40
|
-
dependencies = ["pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.
|
|
36
|
+
dependencies = ["pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.25"]
|
|
41
37
|
|
|
42
38
|
[project.urls]
|
|
43
39
|
Homepage = "https://ai.pydantic.dev"
|
|
@@ -46,7 +42,7 @@ Documentation = "https://ai.pydantic.dev"
|
|
|
46
42
|
Changelog = "https://github.com/pydantic/pydantic-ai/releases"
|
|
47
43
|
|
|
48
44
|
[project.optional-dependencies]
|
|
49
|
-
examples = ["pydantic-ai-examples==0.0.
|
|
45
|
+
examples = ["pydantic-ai-examples==0.0.25"]
|
|
50
46
|
logfire = ["logfire>=2.3"]
|
|
51
47
|
|
|
52
48
|
[tool.uv.sources]
|
|
@@ -61,7 +57,7 @@ members = ["pydantic_ai_slim", "pydantic_graph", "examples"]
|
|
|
61
57
|
# dev dependencies are defined in `pydantic-ai-slim/pyproject.toml` to allow for minimal testing
|
|
62
58
|
lint = [
|
|
63
59
|
"mypy>=1.11.2",
|
|
64
|
-
"pyright>=1.1.388",
|
|
60
|
+
"pyright>=1.1.388,<1.1.390",
|
|
65
61
|
"ruff>=0.6.9",
|
|
66
62
|
]
|
|
67
63
|
docs = [
|
|
@@ -193,4 +189,4 @@ skip = '.git*,*.svg,*.lock,*.css'
|
|
|
193
189
|
check-hidden = true
|
|
194
190
|
# Ignore "formatting" like **L**anguage
|
|
195
191
|
ignore-regex = '\*\*[A-Z]\*\*[a-z]+\b'
|
|
196
|
-
|
|
192
|
+
ignore-words-list = 'asend'
|
|
@@ -17,20 +17,23 @@ import httpx
|
|
|
17
17
|
import pytest
|
|
18
18
|
from _pytest.assertion.rewrite import AssertionRewritingHook
|
|
19
19
|
from typing_extensions import TypeAlias
|
|
20
|
+
from vcr import VCR
|
|
20
21
|
|
|
21
22
|
import pydantic_ai.models
|
|
22
23
|
|
|
23
|
-
__all__ = '
|
|
24
|
+
__all__ = 'IsDatetime', 'IsFloat', 'IsNow', 'IsStr', 'TestEnv', 'ClientWithHandler', 'try_import'
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
pydantic_ai.models.ALLOW_MODEL_REQUESTS = False
|
|
27
28
|
|
|
28
29
|
if TYPE_CHECKING:
|
|
29
30
|
|
|
30
|
-
def
|
|
31
|
+
def IsDatetime(*args: Any, **kwargs: Any) -> datetime: ...
|
|
31
32
|
def IsFloat(*args: Any, **kwargs: Any) -> float: ...
|
|
33
|
+
def IsNow(*args: Any, **kwargs: Any) -> datetime: ...
|
|
34
|
+
def IsStr(*args: Any, **kwargs: Any) -> str: ...
|
|
32
35
|
else:
|
|
33
|
-
from dirty_equals import IsFloat, IsNow as _IsNow
|
|
36
|
+
from dirty_equals import IsDatetime, IsFloat, IsNow as _IsNow, IsStr
|
|
34
37
|
|
|
35
38
|
def IsNow(*args: Any, **kwargs: Any):
|
|
36
39
|
# Increase the default value of `delta` to 10 to reduce test flakiness on overburdened machines
|
|
@@ -179,3 +182,18 @@ def set_event_loop() -> Iterator[None]:
|
|
|
179
182
|
asyncio.set_event_loop(new_loop)
|
|
180
183
|
yield
|
|
181
184
|
new_loop.close()
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def pytest_recording_configure(config: Any, vcr: VCR):
|
|
188
|
+
from . import json_body_serializer
|
|
189
|
+
|
|
190
|
+
vcr.register_serializer('yaml', json_body_serializer)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
@pytest.fixture(scope='module')
|
|
194
|
+
def vcr_config():
|
|
195
|
+
return {
|
|
196
|
+
# Note: additional header filtering is done inside the serializer
|
|
197
|
+
'filter_headers': ['authorization', 'x-api-key'],
|
|
198
|
+
'decode_compressed_response': True,
|
|
199
|
+
}
|
|
@@ -57,11 +57,11 @@ async def test_graph():
|
|
|
57
57
|
assert my_graph.name is None
|
|
58
58
|
assert my_graph._get_state_type() is type(None)
|
|
59
59
|
assert my_graph._get_run_end_type() is int
|
|
60
|
-
result
|
|
60
|
+
result = await my_graph.run(Float2String(3.14))
|
|
61
61
|
# len('3.14') * 2 == 8
|
|
62
|
-
assert result == 8
|
|
62
|
+
assert result.output == 8
|
|
63
63
|
assert my_graph.name == 'my_graph'
|
|
64
|
-
assert history == snapshot(
|
|
64
|
+
assert result.history == snapshot(
|
|
65
65
|
[
|
|
66
66
|
NodeStep(
|
|
67
67
|
state=None,
|
|
@@ -84,10 +84,10 @@ async def test_graph():
|
|
|
84
84
|
EndStep(result=End(data=8), ts=IsNow(tz=timezone.utc)),
|
|
85
85
|
]
|
|
86
86
|
)
|
|
87
|
-
result
|
|
87
|
+
result = await my_graph.run(Float2String(3.14159))
|
|
88
88
|
# len('3.14159') == 7, 21 * 2 == 42
|
|
89
|
-
assert result == 42
|
|
90
|
-
assert history == snapshot(
|
|
89
|
+
assert result.output == 42
|
|
90
|
+
assert result.history == snapshot(
|
|
91
91
|
[
|
|
92
92
|
NodeStep(
|
|
93
93
|
state=None,
|
|
@@ -122,7 +122,7 @@ async def test_graph():
|
|
|
122
122
|
EndStep(result=End(data=42), ts=IsNow(tz=timezone.utc)),
|
|
123
123
|
]
|
|
124
124
|
)
|
|
125
|
-
assert [e.data_snapshot() for e in history] == snapshot(
|
|
125
|
+
assert [e.data_snapshot() for e in result.history] == snapshot(
|
|
126
126
|
[
|
|
127
127
|
Float2String(input_data=3.14159),
|
|
128
128
|
String2Length(input_data='3.14159'),
|
|
@@ -320,10 +320,10 @@ async def test_deps():
|
|
|
320
320
|
return End(123)
|
|
321
321
|
|
|
322
322
|
g = Graph(nodes=(Foo, Bar))
|
|
323
|
-
result
|
|
323
|
+
result = await g.run(Foo(), deps=Deps(1, 2))
|
|
324
324
|
|
|
325
|
-
assert result == 123
|
|
326
|
-
assert history == snapshot(
|
|
325
|
+
assert result.output == 123
|
|
326
|
+
assert result.history == snapshot(
|
|
327
327
|
[
|
|
328
328
|
NodeStep(state=None, node=Foo(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
329
329
|
NodeStep(state=None, node=Bar(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
@@ -46,16 +46,17 @@ class Bar(BaseNode[MyState, None, int]):
|
|
|
46
46
|
],
|
|
47
47
|
)
|
|
48
48
|
async def test_dump_load_history(graph: Graph[MyState, None, int]):
|
|
49
|
-
result
|
|
50
|
-
assert result == snapshot(4)
|
|
51
|
-
assert
|
|
49
|
+
result = await graph.run(Foo(), state=MyState(1, ''))
|
|
50
|
+
assert result.output == snapshot(4)
|
|
51
|
+
assert result.state == snapshot(MyState(x=2, y='y'))
|
|
52
|
+
assert result.history == snapshot(
|
|
52
53
|
[
|
|
53
54
|
NodeStep(state=MyState(x=2, y=''), node=Foo(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
54
55
|
NodeStep(state=MyState(x=2, y='y'), node=Bar(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
55
56
|
EndStep(result=End(4), ts=IsNow(tz=timezone.utc)),
|
|
56
57
|
]
|
|
57
58
|
)
|
|
58
|
-
history_json = graph.dump_history(history)
|
|
59
|
+
history_json = graph.dump_history(result.history)
|
|
59
60
|
assert json.loads(history_json) == snapshot(
|
|
60
61
|
[
|
|
61
62
|
{
|
|
@@ -76,7 +77,7 @@ async def test_dump_load_history(graph: Graph[MyState, None, int]):
|
|
|
76
77
|
]
|
|
77
78
|
)
|
|
78
79
|
history_loaded = graph.load_history(history_json)
|
|
79
|
-
assert history == history_loaded
|
|
80
|
+
assert result.history == history_loaded
|
|
80
81
|
|
|
81
82
|
custom_history = [
|
|
82
83
|
{
|
|
@@ -58,9 +58,9 @@ graph2 = Graph(nodes=(Spam, Foo, Bar, Eggs))
|
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
async def test_run_graph():
|
|
61
|
-
result
|
|
62
|
-
assert result is None
|
|
63
|
-
assert history == snapshot(
|
|
61
|
+
result = await graph1.run(Foo())
|
|
62
|
+
assert result.output is None
|
|
63
|
+
assert result.history == snapshot(
|
|
64
64
|
[
|
|
65
65
|
NodeStep(
|
|
66
66
|
state=None,
|
|
@@ -36,9 +36,9 @@ async def test_run_graph():
|
|
|
36
36
|
assert graph._get_state_type() is MyState
|
|
37
37
|
assert graph._get_run_end_type() is str
|
|
38
38
|
state = MyState(1, '')
|
|
39
|
-
result
|
|
40
|
-
assert result == snapshot('x=2 y=y')
|
|
41
|
-
assert history == snapshot(
|
|
39
|
+
result = await graph.run(Foo(), state=state)
|
|
40
|
+
assert result.output == snapshot('x=2 y=y')
|
|
41
|
+
assert result.history == snapshot(
|
|
42
42
|
[
|
|
43
43
|
NodeStep(
|
|
44
44
|
state=MyState(x=2, y=''),
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false
|
|
2
|
+
import json
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from yaml import Dumper, Loader
|
|
9
|
+
else:
|
|
10
|
+
try:
|
|
11
|
+
from yaml import CDumper as Dumper, CLoader as Loader
|
|
12
|
+
except ImportError:
|
|
13
|
+
from yaml import Dumper, Loader
|
|
14
|
+
|
|
15
|
+
FILTERED_HEADER_PREFIXES = ['anthropic-', 'cf-', 'x-']
|
|
16
|
+
FILTERED_HEADERS = {'authorization', 'date', 'request-id', 'server', 'user-agent', 'via'}
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LiteralDumper(Dumper):
|
|
20
|
+
"""
|
|
21
|
+
A custom dumper that will represent multi-line strings using literal style.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def str_presenter(dumper: Dumper, data: str):
|
|
26
|
+
"""If the string contains newlines, represent it as a literal block."""
|
|
27
|
+
if '\n' in data:
|
|
28
|
+
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
|
|
29
|
+
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Register the custom presenter on our dumper
|
|
33
|
+
LiteralDumper.add_representer(str, str_presenter)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def deserialize(cassette_string: str):
|
|
37
|
+
cassette_dict = yaml.load(cassette_string, Loader=Loader)
|
|
38
|
+
for interaction in cassette_dict['interactions']:
|
|
39
|
+
for kind, data in interaction.items():
|
|
40
|
+
parsed_body = data.pop('parsed_body', None)
|
|
41
|
+
if parsed_body is not None:
|
|
42
|
+
dumped_body = json.dumps(parsed_body)
|
|
43
|
+
data['body'] = {'string': dumped_body} if kind == 'response' else dumped_body
|
|
44
|
+
return cassette_dict
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def serialize(cassette_dict: Any):
|
|
48
|
+
for interaction in cassette_dict['interactions']:
|
|
49
|
+
for _kind, data in interaction.items():
|
|
50
|
+
headers: dict[str, list[str]] = data.get('headers', {})
|
|
51
|
+
# make headers lowercase
|
|
52
|
+
headers = {k.lower(): v for k, v in headers.items()}
|
|
53
|
+
# filter headers by name
|
|
54
|
+
headers = {k: v for k, v in headers.items() if k not in FILTERED_HEADERS}
|
|
55
|
+
# filter headers by prefix
|
|
56
|
+
headers = {
|
|
57
|
+
k: v for k, v in headers.items() if not any(k.startswith(prefix) for prefix in FILTERED_HEADER_PREFIXES)
|
|
58
|
+
}
|
|
59
|
+
# update headers on source object
|
|
60
|
+
data['headers'] = headers
|
|
61
|
+
|
|
62
|
+
content_type = headers.get('content-type', None)
|
|
63
|
+
if content_type != ['application/json']:
|
|
64
|
+
continue
|
|
65
|
+
|
|
66
|
+
# Parse the body as JSON
|
|
67
|
+
body: Any = data.get('body', None)
|
|
68
|
+
assert body is not None, data
|
|
69
|
+
if isinstance(body, dict):
|
|
70
|
+
# Responses will have the body under a field called 'string'
|
|
71
|
+
body = body.get('string')
|
|
72
|
+
if body is not None:
|
|
73
|
+
data['parsed_body'] = json.loads(body)
|
|
74
|
+
del data['body']
|
|
75
|
+
|
|
76
|
+
# Use our custom dumper
|
|
77
|
+
return yaml.dump(cassette_dict, Dumper=LiteralDumper, allow_unicode=True, width=120)
|
pydantic_ai-0.0.25/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
headers:
|
|
4
|
+
accept:
|
|
5
|
+
- application/json
|
|
6
|
+
accept-encoding:
|
|
7
|
+
- gzip, deflate
|
|
8
|
+
connection:
|
|
9
|
+
- keep-alive
|
|
10
|
+
content-length:
|
|
11
|
+
- '793'
|
|
12
|
+
content-type:
|
|
13
|
+
- application/json
|
|
14
|
+
host:
|
|
15
|
+
- api.anthropic.com
|
|
16
|
+
method: POST
|
|
17
|
+
parsed_body:
|
|
18
|
+
max_tokens: 1024
|
|
19
|
+
messages:
|
|
20
|
+
- content:
|
|
21
|
+
- text: Alice, Bob, Charlie and Daisy are a family. Who is the youngest?
|
|
22
|
+
type: text
|
|
23
|
+
role: user
|
|
24
|
+
model: claude-3-5-haiku-latest
|
|
25
|
+
stream: false
|
|
26
|
+
system: "\n Use the `retrieve_entity_info` tool to get information about a specific person.\n If you need to use
|
|
27
|
+
`retrieve_entity_info` to get information about multiple people, try\n to call them in parallel as much as possible.\n
|
|
28
|
+
\ Think step by step and then provide a single most probable concise answer.\n "
|
|
29
|
+
tool_choice:
|
|
30
|
+
type: auto
|
|
31
|
+
tools:
|
|
32
|
+
- description: Get the knowledge about the given entity.
|
|
33
|
+
input_schema:
|
|
34
|
+
additionalProperties: false
|
|
35
|
+
properties:
|
|
36
|
+
name:
|
|
37
|
+
title: Name
|
|
38
|
+
type: string
|
|
39
|
+
required:
|
|
40
|
+
- name
|
|
41
|
+
type: object
|
|
42
|
+
name: retrieve_entity_info
|
|
43
|
+
uri: https://api.anthropic.com/v1/messages
|
|
44
|
+
response:
|
|
45
|
+
headers:
|
|
46
|
+
connection:
|
|
47
|
+
- keep-alive
|
|
48
|
+
content-length:
|
|
49
|
+
- '835'
|
|
50
|
+
content-type:
|
|
51
|
+
- application/json
|
|
52
|
+
transfer-encoding:
|
|
53
|
+
- chunked
|
|
54
|
+
parsed_body:
|
|
55
|
+
content:
|
|
56
|
+
- text: Let me retrieve the information about each family member to determine their ages.
|
|
57
|
+
type: text
|
|
58
|
+
- id: toolu_01B6ALG3PUWnoTbSG77WfHsW
|
|
59
|
+
input:
|
|
60
|
+
name: Alice
|
|
61
|
+
name: retrieve_entity_info
|
|
62
|
+
type: tool_use
|
|
63
|
+
- id: toolu_018ar6FXUarSmz9v81ajKN5q
|
|
64
|
+
input:
|
|
65
|
+
name: Bob
|
|
66
|
+
name: retrieve_entity_info
|
|
67
|
+
type: tool_use
|
|
68
|
+
- id: toolu_01DQDAMdPsj6Seitxpc29HZF
|
|
69
|
+
input:
|
|
70
|
+
name: Charlie
|
|
71
|
+
name: retrieve_entity_info
|
|
72
|
+
type: tool_use
|
|
73
|
+
- id: toolu_01Vzma2bAahRtnZi69djzStJ
|
|
74
|
+
input:
|
|
75
|
+
name: Daisy
|
|
76
|
+
name: retrieve_entity_info
|
|
77
|
+
type: tool_use
|
|
78
|
+
id: msg_01B6EyfNCSVqtFxbXvfmQXaX
|
|
79
|
+
model: claude-3-5-haiku-20241022
|
|
80
|
+
role: assistant
|
|
81
|
+
stop_reason: tool_use
|
|
82
|
+
stop_sequence: null
|
|
83
|
+
type: message
|
|
84
|
+
usage:
|
|
85
|
+
cache_creation_input_tokens: 0
|
|
86
|
+
cache_read_input_tokens: 0
|
|
87
|
+
input_tokens: 429
|
|
88
|
+
output_tokens: 186
|
|
89
|
+
status:
|
|
90
|
+
code: 200
|
|
91
|
+
message: OK
|
|
92
|
+
- request:
|
|
93
|
+
headers:
|
|
94
|
+
accept:
|
|
95
|
+
- application/json
|
|
96
|
+
accept-encoding:
|
|
97
|
+
- gzip, deflate
|
|
98
|
+
connection:
|
|
99
|
+
- keep-alive
|
|
100
|
+
content-length:
|
|
101
|
+
- '1928'
|
|
102
|
+
content-type:
|
|
103
|
+
- application/json
|
|
104
|
+
host:
|
|
105
|
+
- api.anthropic.com
|
|
106
|
+
method: POST
|
|
107
|
+
parsed_body:
|
|
108
|
+
max_tokens: 1024
|
|
109
|
+
messages:
|
|
110
|
+
- content:
|
|
111
|
+
- text: Alice, Bob, Charlie and Daisy are a family. Who is the youngest?
|
|
112
|
+
type: text
|
|
113
|
+
role: user
|
|
114
|
+
- content:
|
|
115
|
+
- text: Let me retrieve the information about each family member to determine their ages.
|
|
116
|
+
type: text
|
|
117
|
+
- id: toolu_01B6ALG3PUWnoTbSG77WfHsW
|
|
118
|
+
input:
|
|
119
|
+
name: Alice
|
|
120
|
+
name: retrieve_entity_info
|
|
121
|
+
type: tool_use
|
|
122
|
+
- id: toolu_018ar6FXUarSmz9v81ajKN5q
|
|
123
|
+
input:
|
|
124
|
+
name: Bob
|
|
125
|
+
name: retrieve_entity_info
|
|
126
|
+
type: tool_use
|
|
127
|
+
- id: toolu_01DQDAMdPsj6Seitxpc29HZF
|
|
128
|
+
input:
|
|
129
|
+
name: Charlie
|
|
130
|
+
name: retrieve_entity_info
|
|
131
|
+
type: tool_use
|
|
132
|
+
- id: toolu_01Vzma2bAahRtnZi69djzStJ
|
|
133
|
+
input:
|
|
134
|
+
name: Daisy
|
|
135
|
+
name: retrieve_entity_info
|
|
136
|
+
type: tool_use
|
|
137
|
+
role: assistant
|
|
138
|
+
- content:
|
|
139
|
+
- content: alice is bob's wife
|
|
140
|
+
is_error: false
|
|
141
|
+
tool_use_id: toolu_01B6ALG3PUWnoTbSG77WfHsW
|
|
142
|
+
type: tool_result
|
|
143
|
+
- content: bob is alice's husband
|
|
144
|
+
is_error: false
|
|
145
|
+
tool_use_id: toolu_018ar6FXUarSmz9v81ajKN5q
|
|
146
|
+
type: tool_result
|
|
147
|
+
- content: charlie is alice's son
|
|
148
|
+
is_error: false
|
|
149
|
+
tool_use_id: toolu_01DQDAMdPsj6Seitxpc29HZF
|
|
150
|
+
type: tool_result
|
|
151
|
+
- content: daisy is bob's daughter and charlie's younger sister
|
|
152
|
+
is_error: false
|
|
153
|
+
tool_use_id: toolu_01Vzma2bAahRtnZi69djzStJ
|
|
154
|
+
type: tool_result
|
|
155
|
+
role: user
|
|
156
|
+
model: claude-3-5-haiku-latest
|
|
157
|
+
stream: false
|
|
158
|
+
system: "\n Use the `retrieve_entity_info` tool to get information about a specific person.\n If you need to use
|
|
159
|
+
`retrieve_entity_info` to get information about multiple people, try\n to call them in parallel as much as possible.\n
|
|
160
|
+
\ Think step by step and then provide a single most probable concise answer.\n "
|
|
161
|
+
tool_choice:
|
|
162
|
+
type: auto
|
|
163
|
+
tools:
|
|
164
|
+
- description: Get the knowledge about the given entity.
|
|
165
|
+
input_schema:
|
|
166
|
+
additionalProperties: false
|
|
167
|
+
properties:
|
|
168
|
+
name:
|
|
169
|
+
title: Name
|
|
170
|
+
type: string
|
|
171
|
+
required:
|
|
172
|
+
- name
|
|
173
|
+
type: object
|
|
174
|
+
name: retrieve_entity_info
|
|
175
|
+
uri: https://api.anthropic.com/v1/messages
|
|
176
|
+
response:
|
|
177
|
+
headers:
|
|
178
|
+
connection:
|
|
179
|
+
- keep-alive
|
|
180
|
+
content-length:
|
|
181
|
+
- '618'
|
|
182
|
+
content-type:
|
|
183
|
+
- application/json
|
|
184
|
+
transfer-encoding:
|
|
185
|
+
- chunked
|
|
186
|
+
parsed_body:
|
|
187
|
+
content:
|
|
188
|
+
- text: |-
|
|
189
|
+
Based on the retrieved information, we can see the family relationships:
|
|
190
|
+
- Alice and Bob are married
|
|
191
|
+
- Charlie is their son
|
|
192
|
+
- Daisy is their daughter and Charlie's younger sister
|
|
193
|
+
|
|
194
|
+
Since Daisy is described as Charlie's younger sister, Daisy is the youngest in this family.
|
|
195
|
+
|
|
196
|
+
The answer is: Daisy is the youngest.
|
|
197
|
+
type: text
|
|
198
|
+
id: msg_018ApbFsve4yLKjivLPAzqvA
|
|
199
|
+
model: claude-3-5-haiku-20241022
|
|
200
|
+
role: assistant
|
|
201
|
+
stop_reason: end_turn
|
|
202
|
+
stop_sequence: null
|
|
203
|
+
type: message
|
|
204
|
+
usage:
|
|
205
|
+
cache_creation_input_tokens: 0
|
|
206
|
+
cache_read_input_tokens: 0
|
|
207
|
+
input_tokens: 761
|
|
208
|
+
output_tokens: 78
|
|
209
|
+
status:
|
|
210
|
+
code: 200
|
|
211
|
+
message: OK
|
|
212
|
+
version: 1
|