pydantic-ai 0.0.24__tar.gz → 0.0.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/Makefile +5 -1
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/PKG-INFO +5 -9
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/pyproject.toml +8 -12
- pydantic_ai-0.0.26/tests/assets/kiwi.png +0 -0
- pydantic_ai-0.0.26/tests/assets/marcelo.mp3 +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/conftest.py +66 -3
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/graph/test_graph.py +10 -10
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/graph/test_history.py +7 -6
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/graph/test_mermaid.py +3 -3
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/graph/test_state.py +4 -4
- pydantic_ai-0.0.26/tests/json_body_serializer.py +77 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +662 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +212 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +85 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_gemini/test_image_url_input.yaml +6903 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +74 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_groq/test_image_url_input.yaml +84 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +85 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +82 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +59 -0
- pydantic_ai-0.0.26/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +61 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_anthropic.py +125 -1
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_cohere.py +23 -3
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_gemini.py +63 -10
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_groq.py +55 -0
- pydantic_ai-0.0.26/tests/models/test_instrumented.py +496 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_mistral.py +84 -2
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_model_function.py +16 -14
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_model_test.py +2 -2
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_openai.py +79 -5
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_agent.py +130 -132
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_examples.py +2 -0
- pydantic_ai-0.0.26/tests/test_json_body_serializer.py +177 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_streaming.py +9 -13
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_usage_limits.py +31 -27
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/typed_agent.py +3 -3
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/typed_graph.py +3 -3
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/.gitignore +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/LICENSE +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/README.md +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/__init__.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/import_examples.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/models/test_vertexai.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_deps.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_live.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_logfire.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_tools.py +0 -0
- {pydantic_ai-0.0.24 → pydantic_ai-0.0.26}/tests/test_utils.py +0 -0
|
@@ -64,7 +64,11 @@ testcov: test ## Run tests and generate a coverage report
|
|
|
64
64
|
|
|
65
65
|
.PHONY: update-examples
|
|
66
66
|
update-examples: ## Update documentation examples
|
|
67
|
-
uv run -m pytest --update-examples
|
|
67
|
+
uv run -m pytest --update-examples tests/test_examples.py
|
|
68
|
+
|
|
69
|
+
.PHONY: update-vcr-tests
|
|
70
|
+
update-vcr-tests: ## Update tests using VCR that hit LLM APIs; note you'll need to set API keys as appropriate
|
|
71
|
+
uv run -m pytest --record-mode=rewrite tests
|
|
68
72
|
|
|
69
73
|
# `--no-strict` so you can build the docs without insiders packages
|
|
70
74
|
.PHONY: docs
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.26
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -10,17 +10,12 @@ Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
|
10
10
|
License-Expression: MIT
|
|
11
11
|
License-File: LICENSE
|
|
12
12
|
Classifier: Development Status :: 4 - Beta
|
|
13
|
-
Classifier: Environment :: Console
|
|
14
|
-
Classifier: Environment :: MacOS X
|
|
15
13
|
Classifier: Framework :: Pydantic
|
|
16
14
|
Classifier: Framework :: Pydantic :: 2
|
|
17
|
-
Classifier: Framework :: Pytest
|
|
18
15
|
Classifier: Intended Audience :: Developers
|
|
19
16
|
Classifier: Intended Audience :: Information Technology
|
|
20
|
-
Classifier: Intended Audience :: System Administrators
|
|
21
17
|
Classifier: License :: OSI Approved :: MIT License
|
|
22
|
-
Classifier: Operating System ::
|
|
23
|
-
Classifier: Operating System :: Unix
|
|
18
|
+
Classifier: Operating System :: OS Independent
|
|
24
19
|
Classifier: Programming Language :: Python
|
|
25
20
|
Classifier: Programming Language :: Python :: 3
|
|
26
21
|
Classifier: Programming Language :: Python :: 3 :: Only
|
|
@@ -30,11 +25,12 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
30
25
|
Classifier: Programming Language :: Python :: 3.12
|
|
31
26
|
Classifier: Programming Language :: Python :: 3.13
|
|
32
27
|
Classifier: Topic :: Internet
|
|
28
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
33
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
34
30
|
Requires-Python: >=3.9
|
|
35
|
-
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.26
|
|
36
32
|
Provides-Extra: examples
|
|
37
|
-
Requires-Dist: pydantic-ai-examples==0.0.
|
|
33
|
+
Requires-Dist: pydantic-ai-examples==0.0.26; extra == 'examples'
|
|
38
34
|
Provides-Extra: logfire
|
|
39
35
|
Requires-Dist: logfire>=2.3; extra == 'logfire'
|
|
40
36
|
Description-Content-Type: text/markdown
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.26"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs"
|
|
9
9
|
authors = [
|
|
10
10
|
{ name = "Samuel Colvin", email = "samuel@pydantic.dev" },
|
|
@@ -23,21 +23,17 @@ classifiers = [
|
|
|
23
23
|
"Programming Language :: Python :: 3.13",
|
|
24
24
|
"Intended Audience :: Developers",
|
|
25
25
|
"Intended Audience :: Information Technology",
|
|
26
|
-
"Intended Audience :: System Administrators",
|
|
27
26
|
"License :: OSI Approved :: MIT License",
|
|
28
|
-
"Operating System ::
|
|
29
|
-
"Operating System :: POSIX :: Linux",
|
|
30
|
-
"Environment :: Console",
|
|
31
|
-
"Environment :: MacOS X",
|
|
32
|
-
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
27
|
+
"Operating System :: OS Independent",
|
|
33
28
|
"Topic :: Internet",
|
|
29
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
30
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
34
31
|
"Framework :: Pydantic",
|
|
35
32
|
"Framework :: Pydantic :: 2",
|
|
36
|
-
"Framework :: Pytest",
|
|
37
33
|
]
|
|
38
34
|
requires-python = ">=3.9"
|
|
39
35
|
|
|
40
|
-
dependencies = ["pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.
|
|
36
|
+
dependencies = ["pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.26"]
|
|
41
37
|
|
|
42
38
|
[project.urls]
|
|
43
39
|
Homepage = "https://ai.pydantic.dev"
|
|
@@ -46,7 +42,7 @@ Documentation = "https://ai.pydantic.dev"
|
|
|
46
42
|
Changelog = "https://github.com/pydantic/pydantic-ai/releases"
|
|
47
43
|
|
|
48
44
|
[project.optional-dependencies]
|
|
49
|
-
examples = ["pydantic-ai-examples==0.0.
|
|
45
|
+
examples = ["pydantic-ai-examples==0.0.26"]
|
|
50
46
|
logfire = ["logfire>=2.3"]
|
|
51
47
|
|
|
52
48
|
[tool.uv.sources]
|
|
@@ -61,7 +57,7 @@ members = ["pydantic_ai_slim", "pydantic_graph", "examples"]
|
|
|
61
57
|
# dev dependencies are defined in `pydantic-ai-slim/pyproject.toml` to allow for minimal testing
|
|
62
58
|
lint = [
|
|
63
59
|
"mypy>=1.11.2",
|
|
64
|
-
"pyright>=1.1.388",
|
|
60
|
+
"pyright>=1.1.388,<1.1.390",
|
|
65
61
|
"ruff>=0.6.9",
|
|
66
62
|
]
|
|
67
63
|
docs = [
|
|
@@ -193,4 +189,4 @@ skip = '.git*,*.svg,*.lock,*.css'
|
|
|
193
189
|
check-hidden = true
|
|
194
190
|
# Ignore "formatting" like **L**anguage
|
|
195
191
|
ignore-regex = '\*\*[A-Z]\*\*[a-z]+\b'
|
|
196
|
-
|
|
192
|
+
ignore-words-list = 'asend'
|
|
Binary file
|
|
Binary file
|
|
@@ -17,20 +17,25 @@ import httpx
|
|
|
17
17
|
import pytest
|
|
18
18
|
from _pytest.assertion.rewrite import AssertionRewritingHook
|
|
19
19
|
from typing_extensions import TypeAlias
|
|
20
|
+
from vcr import VCR
|
|
20
21
|
|
|
21
22
|
import pydantic_ai.models
|
|
23
|
+
from pydantic_ai.messages import BinaryContent
|
|
24
|
+
from pydantic_ai.models import cached_async_http_client
|
|
22
25
|
|
|
23
|
-
__all__ = '
|
|
26
|
+
__all__ = 'IsDatetime', 'IsFloat', 'IsNow', 'IsStr', 'TestEnv', 'ClientWithHandler', 'try_import'
|
|
24
27
|
|
|
25
28
|
|
|
26
29
|
pydantic_ai.models.ALLOW_MODEL_REQUESTS = False
|
|
27
30
|
|
|
28
31
|
if TYPE_CHECKING:
|
|
29
32
|
|
|
30
|
-
def
|
|
33
|
+
def IsDatetime(*args: Any, **kwargs: Any) -> datetime: ...
|
|
31
34
|
def IsFloat(*args: Any, **kwargs: Any) -> float: ...
|
|
35
|
+
def IsNow(*args: Any, **kwargs: Any) -> datetime: ...
|
|
36
|
+
def IsStr(*args: Any, **kwargs: Any) -> str: ...
|
|
32
37
|
else:
|
|
33
|
-
from dirty_equals import IsFloat, IsNow as _IsNow
|
|
38
|
+
from dirty_equals import IsDatetime, IsFloat, IsNow as _IsNow, IsStr
|
|
34
39
|
|
|
35
40
|
def IsNow(*args: Any, **kwargs: Any):
|
|
36
41
|
# Increase the default value of `delta` to 10 to reduce test flakiness on overburdened machines
|
|
@@ -179,3 +184,61 @@ def set_event_loop() -> Iterator[None]:
|
|
|
179
184
|
asyncio.set_event_loop(new_loop)
|
|
180
185
|
yield
|
|
181
186
|
new_loop.close()
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def pytest_recording_configure(config: Any, vcr: VCR):
|
|
190
|
+
from . import json_body_serializer
|
|
191
|
+
|
|
192
|
+
vcr.register_serializer('yaml', json_body_serializer)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
@pytest.fixture(scope='module')
|
|
196
|
+
def vcr_config():
|
|
197
|
+
return {
|
|
198
|
+
# Note: additional header filtering is done inside the serializer
|
|
199
|
+
'filter_headers': ['authorization', 'x-api-key'],
|
|
200
|
+
'decode_compressed_response': True,
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
@pytest.fixture(autouse=True)
|
|
205
|
+
async def close_cached_httpx_client() -> AsyncIterator[None]:
|
|
206
|
+
yield
|
|
207
|
+
await cached_async_http_client().aclose()
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
@pytest.fixture(scope='session')
|
|
211
|
+
def assets_path() -> Path:
|
|
212
|
+
return Path(__file__).parent / 'assets'
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
@pytest.fixture(scope='session')
|
|
216
|
+
def audio_content(assets_path: Path) -> BinaryContent:
|
|
217
|
+
audio_bytes = assets_path.joinpath('marcelo.mp3').read_bytes()
|
|
218
|
+
return BinaryContent(data=audio_bytes, media_type='audio/mpeg')
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
@pytest.fixture(scope='session')
|
|
222
|
+
def image_content(assets_path: Path) -> BinaryContent:
|
|
223
|
+
image_bytes = assets_path.joinpath('kiwi.png').read_bytes()
|
|
224
|
+
return BinaryContent(data=image_bytes, media_type='image/png')
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
@pytest.fixture(scope='session')
|
|
228
|
+
def openai_api_key() -> str:
|
|
229
|
+
return os.getenv('OPENAI_API_KEY', 'mock-api-key')
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
@pytest.fixture(scope='session')
|
|
233
|
+
def gemini_api_key() -> str:
|
|
234
|
+
return os.getenv('GEMINI_API_KEY', 'mock-api-key')
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
@pytest.fixture(scope='session')
|
|
238
|
+
def groq_api_key() -> str:
|
|
239
|
+
return os.getenv('GROQ_API_KEY', 'mock-api-key')
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
@pytest.fixture(scope='session')
|
|
243
|
+
def anthropic_api_key() -> str:
|
|
244
|
+
return os.getenv('ANTHROPIC_API_KEY', 'mock-api-key')
|
|
@@ -57,11 +57,11 @@ async def test_graph():
|
|
|
57
57
|
assert my_graph.name is None
|
|
58
58
|
assert my_graph._get_state_type() is type(None)
|
|
59
59
|
assert my_graph._get_run_end_type() is int
|
|
60
|
-
result
|
|
60
|
+
result = await my_graph.run(Float2String(3.14))
|
|
61
61
|
# len('3.14') * 2 == 8
|
|
62
|
-
assert result == 8
|
|
62
|
+
assert result.output == 8
|
|
63
63
|
assert my_graph.name == 'my_graph'
|
|
64
|
-
assert history == snapshot(
|
|
64
|
+
assert result.history == snapshot(
|
|
65
65
|
[
|
|
66
66
|
NodeStep(
|
|
67
67
|
state=None,
|
|
@@ -84,10 +84,10 @@ async def test_graph():
|
|
|
84
84
|
EndStep(result=End(data=8), ts=IsNow(tz=timezone.utc)),
|
|
85
85
|
]
|
|
86
86
|
)
|
|
87
|
-
result
|
|
87
|
+
result = await my_graph.run(Float2String(3.14159))
|
|
88
88
|
# len('3.14159') == 7, 21 * 2 == 42
|
|
89
|
-
assert result == 42
|
|
90
|
-
assert history == snapshot(
|
|
89
|
+
assert result.output == 42
|
|
90
|
+
assert result.history == snapshot(
|
|
91
91
|
[
|
|
92
92
|
NodeStep(
|
|
93
93
|
state=None,
|
|
@@ -122,7 +122,7 @@ async def test_graph():
|
|
|
122
122
|
EndStep(result=End(data=42), ts=IsNow(tz=timezone.utc)),
|
|
123
123
|
]
|
|
124
124
|
)
|
|
125
|
-
assert [e.data_snapshot() for e in history] == snapshot(
|
|
125
|
+
assert [e.data_snapshot() for e in result.history] == snapshot(
|
|
126
126
|
[
|
|
127
127
|
Float2String(input_data=3.14159),
|
|
128
128
|
String2Length(input_data='3.14159'),
|
|
@@ -320,10 +320,10 @@ async def test_deps():
|
|
|
320
320
|
return End(123)
|
|
321
321
|
|
|
322
322
|
g = Graph(nodes=(Foo, Bar))
|
|
323
|
-
result
|
|
323
|
+
result = await g.run(Foo(), deps=Deps(1, 2))
|
|
324
324
|
|
|
325
|
-
assert result == 123
|
|
326
|
-
assert history == snapshot(
|
|
325
|
+
assert result.output == 123
|
|
326
|
+
assert result.history == snapshot(
|
|
327
327
|
[
|
|
328
328
|
NodeStep(state=None, node=Foo(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
329
329
|
NodeStep(state=None, node=Bar(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
@@ -46,16 +46,17 @@ class Bar(BaseNode[MyState, None, int]):
|
|
|
46
46
|
],
|
|
47
47
|
)
|
|
48
48
|
async def test_dump_load_history(graph: Graph[MyState, None, int]):
|
|
49
|
-
result
|
|
50
|
-
assert result == snapshot(4)
|
|
51
|
-
assert
|
|
49
|
+
result = await graph.run(Foo(), state=MyState(1, ''))
|
|
50
|
+
assert result.output == snapshot(4)
|
|
51
|
+
assert result.state == snapshot(MyState(x=2, y='y'))
|
|
52
|
+
assert result.history == snapshot(
|
|
52
53
|
[
|
|
53
54
|
NodeStep(state=MyState(x=2, y=''), node=Foo(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
54
55
|
NodeStep(state=MyState(x=2, y='y'), node=Bar(), start_ts=IsNow(tz=timezone.utc), duration=IsFloat()),
|
|
55
|
-
EndStep(result=End(4), ts=IsNow(tz=timezone.utc)),
|
|
56
|
+
EndStep(result=End(data=4), ts=IsNow(tz=timezone.utc)),
|
|
56
57
|
]
|
|
57
58
|
)
|
|
58
|
-
history_json = graph.dump_history(history)
|
|
59
|
+
history_json = graph.dump_history(result.history)
|
|
59
60
|
assert json.loads(history_json) == snapshot(
|
|
60
61
|
[
|
|
61
62
|
{
|
|
@@ -76,7 +77,7 @@ async def test_dump_load_history(graph: Graph[MyState, None, int]):
|
|
|
76
77
|
]
|
|
77
78
|
)
|
|
78
79
|
history_loaded = graph.load_history(history_json)
|
|
79
|
-
assert history == history_loaded
|
|
80
|
+
assert result.history == history_loaded
|
|
80
81
|
|
|
81
82
|
custom_history = [
|
|
82
83
|
{
|
|
@@ -58,9 +58,9 @@ graph2 = Graph(nodes=(Spam, Foo, Bar, Eggs))
|
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
async def test_run_graph():
|
|
61
|
-
result
|
|
62
|
-
assert result is None
|
|
63
|
-
assert history == snapshot(
|
|
61
|
+
result = await graph1.run(Foo())
|
|
62
|
+
assert result.output is None
|
|
63
|
+
assert result.history == snapshot(
|
|
64
64
|
[
|
|
65
65
|
NodeStep(
|
|
66
66
|
state=None,
|
|
@@ -36,9 +36,9 @@ async def test_run_graph():
|
|
|
36
36
|
assert graph._get_state_type() is MyState
|
|
37
37
|
assert graph._get_run_end_type() is str
|
|
38
38
|
state = MyState(1, '')
|
|
39
|
-
result
|
|
40
|
-
assert result == snapshot('x=2 y=y')
|
|
41
|
-
assert history == snapshot(
|
|
39
|
+
result = await graph.run(Foo(), state=state)
|
|
40
|
+
assert result.output == snapshot('x=2 y=y')
|
|
41
|
+
assert result.history == snapshot(
|
|
42
42
|
[
|
|
43
43
|
NodeStep(
|
|
44
44
|
state=MyState(x=2, y=''),
|
|
@@ -52,7 +52,7 @@ async def test_run_graph():
|
|
|
52
52
|
start_ts=IsNow(tz=timezone.utc),
|
|
53
53
|
duration=IsFloat(),
|
|
54
54
|
),
|
|
55
|
-
EndStep(result=End('x=2 y=y'), ts=IsNow(tz=timezone.utc)),
|
|
55
|
+
EndStep(result=End(data='x=2 y=y'), ts=IsNow(tz=timezone.utc)),
|
|
56
56
|
]
|
|
57
57
|
)
|
|
58
58
|
assert state == MyState(x=2, y='y')
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false
|
|
2
|
+
import json
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from yaml import Dumper, Loader
|
|
9
|
+
else:
|
|
10
|
+
try:
|
|
11
|
+
from yaml import CDumper as Dumper, CLoader as Loader
|
|
12
|
+
except ImportError:
|
|
13
|
+
from yaml import Dumper, Loader
|
|
14
|
+
|
|
15
|
+
FILTERED_HEADER_PREFIXES = ['anthropic-', 'cf-', 'x-']
|
|
16
|
+
FILTERED_HEADERS = {'authorization', 'date', 'request-id', 'server', 'user-agent', 'via', 'set-cookie'}
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LiteralDumper(Dumper):
|
|
20
|
+
"""
|
|
21
|
+
A custom dumper that will represent multi-line strings using literal style.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def str_presenter(dumper: Dumper, data: str):
|
|
26
|
+
"""If the string contains newlines, represent it as a literal block."""
|
|
27
|
+
if '\n' in data:
|
|
28
|
+
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
|
|
29
|
+
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Register the custom presenter on our dumper
|
|
33
|
+
LiteralDumper.add_representer(str, str_presenter)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def deserialize(cassette_string: str):
|
|
37
|
+
cassette_dict = yaml.load(cassette_string, Loader=Loader)
|
|
38
|
+
for interaction in cassette_dict['interactions']:
|
|
39
|
+
for kind, data in interaction.items():
|
|
40
|
+
parsed_body = data.pop('parsed_body', None)
|
|
41
|
+
if parsed_body is not None:
|
|
42
|
+
dumped_body = json.dumps(parsed_body)
|
|
43
|
+
data['body'] = {'string': dumped_body} if kind == 'response' else dumped_body
|
|
44
|
+
return cassette_dict
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def serialize(cassette_dict: Any):
|
|
48
|
+
for interaction in cassette_dict['interactions']:
|
|
49
|
+
for _kind, data in interaction.items():
|
|
50
|
+
headers: dict[str, list[str]] = data.get('headers', {})
|
|
51
|
+
# make headers lowercase
|
|
52
|
+
headers = {k.lower(): v for k, v in headers.items()}
|
|
53
|
+
# filter headers by name
|
|
54
|
+
headers = {k: v for k, v in headers.items() if k not in FILTERED_HEADERS}
|
|
55
|
+
# filter headers by prefix
|
|
56
|
+
headers = {
|
|
57
|
+
k: v for k, v in headers.items() if not any(k.startswith(prefix) for prefix in FILTERED_HEADER_PREFIXES)
|
|
58
|
+
}
|
|
59
|
+
# update headers on source object
|
|
60
|
+
data['headers'] = headers
|
|
61
|
+
|
|
62
|
+
content_type = headers.get('content-type', None)
|
|
63
|
+
if content_type != ['application/json']:
|
|
64
|
+
continue
|
|
65
|
+
|
|
66
|
+
# Parse the body as JSON
|
|
67
|
+
body: Any = data.get('body', None)
|
|
68
|
+
assert body is not None, data
|
|
69
|
+
if isinstance(body, dict):
|
|
70
|
+
# Responses will have the body under a field called 'string'
|
|
71
|
+
body = body.get('string')
|
|
72
|
+
if body is not None:
|
|
73
|
+
data['parsed_body'] = json.loads(body)
|
|
74
|
+
del data['body']
|
|
75
|
+
|
|
76
|
+
# Use our custom dumper
|
|
77
|
+
return yaml.dump(cassette_dict, Dumper=LiteralDumper, allow_unicode=True, width=120)
|