pydantic-ai 0.0.36__tar.gz → 0.0.37__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/PKG-INFO +3 -3
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/pyproject.toml +3 -3
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_anthropic.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_bedrock.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_cohere.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_fallback.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_gemini.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_groq.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_instrumented.py +12 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_mistral.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_openai.py +1 -1
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_vertexai.py +4 -4
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/providers/test_google_vertex.py +29 -2
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_agent.py +28 -1
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_examples.py +1 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_tools.py +22 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/.gitignore +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/LICENSE +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/Makefile +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/README.md +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/__init__.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/assets/kiwi.png +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/assets/marcelo.mp3 +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/conftest.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/graph/test_graph.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/graph/test_history.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/graph/test_mermaid.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/graph/test_state.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/graph/test_utils.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/import_examples.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/json_body_serializer.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/providers/__init__.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/providers/test_bedrock.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/providers/test_deepseek.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/providers/test_google_gla.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/providers/test_provider_names.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_cli.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_deps.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_json_body_serializer.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_live.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_logfire.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_streaming.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_usage_limits.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/test_utils.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/typed_agent.py +0 -0
- {pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/typed_graph.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.37
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
|
|
|
28
28
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.9
|
|
31
|
-
Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mistral,openai,vertexai]==0.0.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mistral,openai,vertexai]==0.0.37
|
|
32
32
|
Provides-Extra: examples
|
|
33
|
-
Requires-Dist: pydantic-ai-examples==0.0.
|
|
33
|
+
Requires-Dist: pydantic-ai-examples==0.0.37; extra == 'examples'
|
|
34
34
|
Provides-Extra: logfire
|
|
35
35
|
Requires-Dist: logfire>=2.3; extra == 'logfire'
|
|
36
36
|
Description-Content-Type: text/markdown
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.37"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs"
|
|
9
9
|
authors = [
|
|
10
10
|
{ name = "Samuel Colvin", email = "samuel@pydantic.dev" },
|
|
@@ -36,7 +36,7 @@ classifiers = [
|
|
|
36
36
|
]
|
|
37
37
|
requires-python = ">=3.9"
|
|
38
38
|
dependencies = [
|
|
39
|
-
"pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli]==0.0.
|
|
39
|
+
"pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli]==0.0.37",
|
|
40
40
|
]
|
|
41
41
|
|
|
42
42
|
[project.urls]
|
|
@@ -46,7 +46,7 @@ Documentation = "https://ai.pydantic.dev"
|
|
|
46
46
|
Changelog = "https://github.com/pydantic/pydantic-ai/releases"
|
|
47
47
|
|
|
48
48
|
[project.optional-dependencies]
|
|
49
|
-
examples = ["pydantic-ai-examples==0.0.
|
|
49
|
+
examples = ["pydantic-ai-examples==0.0.37"]
|
|
50
50
|
logfire = ["logfire>=2.3"]
|
|
51
51
|
|
|
52
52
|
[tool.uv.sources]
|
|
@@ -58,6 +58,7 @@ def bedrock_provider():
|
|
|
58
58
|
|
|
59
59
|
async def test_bedrock_model(allow_model_requests: None, bedrock_provider: BedrockProvider):
|
|
60
60
|
model = BedrockConverseModel('us.amazon.nova-micro-v1:0', provider=bedrock_provider)
|
|
61
|
+
assert model.base_url == 'https://bedrock-runtime.us-east-1.amazonaws.com'
|
|
61
62
|
agent = Agent(model=model, system_prompt='You are a chatbot.')
|
|
62
63
|
|
|
63
64
|
result = await agent.run('Hello!')
|
|
@@ -58,6 +58,7 @@ def test_api_key_arg(env: TestEnv):
|
|
|
58
58
|
env.set('GEMINI_API_KEY', 'via-env-var')
|
|
59
59
|
m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
|
|
60
60
|
assert m.client.headers['x-goog-api-key'] == 'via-arg'
|
|
61
|
+
assert m.base_url == 'https://generativelanguage.googleapis.com/v1beta/models/'
|
|
61
62
|
|
|
62
63
|
|
|
63
64
|
def test_api_key_env_var(env: TestEnv):
|
|
@@ -56,6 +56,10 @@ class MyModel(Model):
|
|
|
56
56
|
def model_name(self) -> str:
|
|
57
57
|
return 'my_model'
|
|
58
58
|
|
|
59
|
+
@property
|
|
60
|
+
def base_url(self) -> str:
|
|
61
|
+
return 'https://example.com:8000/foo'
|
|
62
|
+
|
|
59
63
|
async def request(
|
|
60
64
|
self,
|
|
61
65
|
messages: list[ModelMessage],
|
|
@@ -146,6 +150,8 @@ async def test_instrumented_model(capfire: CaptureLogfire):
|
|
|
146
150
|
'gen_ai.operation.name': 'chat',
|
|
147
151
|
'gen_ai.system': 'my_system',
|
|
148
152
|
'gen_ai.request.model': 'my_model',
|
|
153
|
+
'server.address': 'example.com',
|
|
154
|
+
'server.port': 8000,
|
|
149
155
|
'gen_ai.request.temperature': 1,
|
|
150
156
|
'logfire.msg': 'chat my_model',
|
|
151
157
|
'logfire.span_type': 'span',
|
|
@@ -366,6 +372,8 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire):
|
|
|
366
372
|
'gen_ai.operation.name': 'chat',
|
|
367
373
|
'gen_ai.system': 'my_system',
|
|
368
374
|
'gen_ai.request.model': 'my_model',
|
|
375
|
+
'server.address': 'example.com',
|
|
376
|
+
'server.port': 8000,
|
|
369
377
|
'gen_ai.request.temperature': 1,
|
|
370
378
|
'logfire.msg': 'chat my_model',
|
|
371
379
|
'logfire.span_type': 'span',
|
|
@@ -447,6 +455,8 @@ async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
|
|
|
447
455
|
'gen_ai.operation.name': 'chat',
|
|
448
456
|
'gen_ai.system': 'my_system',
|
|
449
457
|
'gen_ai.request.model': 'my_model',
|
|
458
|
+
'server.address': 'example.com',
|
|
459
|
+
'server.port': 8000,
|
|
450
460
|
'gen_ai.request.temperature': 1,
|
|
451
461
|
'logfire.msg': 'chat my_model',
|
|
452
462
|
'logfire.span_type': 'span',
|
|
@@ -547,6 +557,8 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
|
|
|
547
557
|
'gen_ai.operation.name': 'chat',
|
|
548
558
|
'gen_ai.system': 'my_system',
|
|
549
559
|
'gen_ai.request.model': 'my_model',
|
|
560
|
+
'server.address': 'example.com',
|
|
561
|
+
'server.port': 8000,
|
|
550
562
|
'gen_ai.request.temperature': 1,
|
|
551
563
|
'logfire.msg': 'chat my_model',
|
|
552
564
|
'logfire.span_type': 'span',
|
|
@@ -60,7 +60,7 @@ pytestmark = [
|
|
|
60
60
|
|
|
61
61
|
def test_init():
|
|
62
62
|
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key='foobar'))
|
|
63
|
-
assert
|
|
63
|
+
assert m.base_url == 'https://api.openai.com/v1/'
|
|
64
64
|
assert m.client.api_key == 'foobar'
|
|
65
65
|
assert m.model_name == 'gpt-4o'
|
|
66
66
|
|
|
@@ -38,7 +38,7 @@ async def test_init_service_account(tmp_path: Path, allow_model_requests: None):
|
|
|
38
38
|
|
|
39
39
|
await model.ainit()
|
|
40
40
|
|
|
41
|
-
assert model.
|
|
41
|
+
assert model.base_url == snapshot(
|
|
42
42
|
'https://us-central1-aiplatform.googleapis.com/v1/projects/my-project-id/locations/us-central1/'
|
|
43
43
|
'publishers/google/models/gemini-1.5-flash:'
|
|
44
44
|
)
|
|
@@ -66,7 +66,7 @@ async def test_init_env(mocker: MockerFixture, allow_model_requests: None):
|
|
|
66
66
|
|
|
67
67
|
assert patch.call_count == 1
|
|
68
68
|
|
|
69
|
-
assert model.
|
|
69
|
+
assert model.base_url == snapshot(
|
|
70
70
|
'https://us-central1-aiplatform.googleapis.com/v1/projects/my-project-id/locations/us-central1/'
|
|
71
71
|
'publishers/google/models/gemini-1.5-flash:'
|
|
72
72
|
)
|
|
@@ -75,7 +75,7 @@ async def test_init_env(mocker: MockerFixture, allow_model_requests: None):
|
|
|
75
75
|
assert model.system == snapshot('google-vertex')
|
|
76
76
|
|
|
77
77
|
await model.ainit()
|
|
78
|
-
assert model.
|
|
78
|
+
assert model.base_url is not None
|
|
79
79
|
assert model.auth is not None
|
|
80
80
|
assert patch.call_count == 1
|
|
81
81
|
|
|
@@ -90,7 +90,7 @@ async def test_init_right_project_id(tmp_path: Path, allow_model_requests: None)
|
|
|
90
90
|
|
|
91
91
|
await model.ainit()
|
|
92
92
|
|
|
93
|
-
assert model.
|
|
93
|
+
assert model.base_url == snapshot(
|
|
94
94
|
'https://us-central1-aiplatform.googleapis.com/v1/projects/my-project-id/locations/us-central1/'
|
|
95
95
|
'publishers/google/models/gemini-1.5-flash:'
|
|
96
96
|
)
|
|
@@ -74,8 +74,31 @@ async def test_google_vertex_provider_service_account_file(
|
|
|
74
74
|
assert getattr(provider.client.auth, 'token_created') is not None
|
|
75
75
|
|
|
76
76
|
|
|
77
|
-
def
|
|
78
|
-
|
|
77
|
+
async def test_google_vertex_provider_service_account_file_info(
|
|
78
|
+
monkeypatch: pytest.MonkeyPatch, allow_model_requests: None
|
|
79
|
+
):
|
|
80
|
+
account_info = prepare_service_account_contents('my-project-id')
|
|
81
|
+
|
|
82
|
+
provider = GoogleVertexProvider(service_account_info=account_info)
|
|
83
|
+
monkeypatch.setattr(provider.client.auth, '_refresh_token', lambda: 'my-token')
|
|
84
|
+
await provider.client.post('/gemini-1.0-pro:generateContent')
|
|
85
|
+
assert provider.region == 'us-central1'
|
|
86
|
+
assert getattr(provider.client.auth, 'project_id') == 'my-project-id'
|
|
87
|
+
assert getattr(provider.client.auth, 'token_created') is not None
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
async def test_google_vertex_provider_service_account_xor(allow_model_requests: None):
|
|
91
|
+
with pytest.raises(
|
|
92
|
+
ValueError, match='Only one of `service_account_file` or `service_account_info` can be provided'
|
|
93
|
+
):
|
|
94
|
+
GoogleVertexProvider( # type: ignore[reportCallIssue]
|
|
95
|
+
service_account_file='path/to/service-account.json',
|
|
96
|
+
service_account_info=prepare_service_account_contents('my-project-id'),
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def prepare_service_account_contents(project_id: str) -> dict[str, str]:
|
|
101
|
+
return {
|
|
79
102
|
'type': 'service_account',
|
|
80
103
|
'project_id': project_id,
|
|
81
104
|
'private_key_id': 'abc',
|
|
@@ -107,4 +130,8 @@ def save_service_account(service_account_path: Path, project_id: str) -> None:
|
|
|
107
130
|
'universe_domain': 'googleapis.com',
|
|
108
131
|
}
|
|
109
132
|
|
|
133
|
+
|
|
134
|
+
def save_service_account(service_account_path: Path, project_id: str) -> None:
|
|
135
|
+
service_account = prepare_service_account_contents(project_id)
|
|
136
|
+
|
|
110
137
|
service_account_path.write_text(json.dumps(service_account, indent=2))
|
|
@@ -13,6 +13,7 @@ from pydantic_core import to_json
|
|
|
13
13
|
|
|
14
14
|
from pydantic_ai import Agent, ModelRetry, RunContext, UnexpectedModelBehavior, UserError, capture_run_messages
|
|
15
15
|
from pydantic_ai.messages import (
|
|
16
|
+
BinaryContent,
|
|
16
17
|
ModelMessage,
|
|
17
18
|
ModelRequest,
|
|
18
19
|
ModelResponse,
|
|
@@ -29,7 +30,7 @@ from pydantic_ai.models.test import TestModel
|
|
|
29
30
|
from pydantic_ai.result import Usage
|
|
30
31
|
from pydantic_ai.tools import ToolDefinition
|
|
31
32
|
|
|
32
|
-
from .conftest import IsNow, TestEnv
|
|
33
|
+
from .conftest import IsNow, IsStr, TestEnv
|
|
33
34
|
|
|
34
35
|
pytestmark = pytest.mark.anyio
|
|
35
36
|
|
|
@@ -1572,3 +1573,29 @@ def test_custom_result_type_invalid() -> None:
|
|
|
1572
1573
|
|
|
1573
1574
|
with pytest.raises(UserError, match='Cannot set a custom run `result_type` when the agent has result validators'):
|
|
1574
1575
|
agent.run_sync('Hello', result_type=int)
|
|
1576
|
+
|
|
1577
|
+
|
|
1578
|
+
def test_binary_content_all_messages_json():
|
|
1579
|
+
agent = Agent('test')
|
|
1580
|
+
|
|
1581
|
+
result = agent.run_sync(['Hello', BinaryContent(data=b'Hello', media_type='text/plain')])
|
|
1582
|
+
assert json.loads(result.all_messages_json()) == snapshot(
|
|
1583
|
+
[
|
|
1584
|
+
{
|
|
1585
|
+
'parts': [
|
|
1586
|
+
{
|
|
1587
|
+
'content': ['Hello', {'data': 'SGVsbG8=', 'media_type': 'text/plain', 'kind': 'binary'}],
|
|
1588
|
+
'timestamp': IsStr(),
|
|
1589
|
+
'part_kind': 'user-prompt',
|
|
1590
|
+
}
|
|
1591
|
+
],
|
|
1592
|
+
'kind': 'request',
|
|
1593
|
+
},
|
|
1594
|
+
{
|
|
1595
|
+
'parts': [{'content': 'success (no tool calls)', 'part_kind': 'text'}],
|
|
1596
|
+
'model_name': 'test',
|
|
1597
|
+
'timestamp': IsStr(),
|
|
1598
|
+
'kind': 'response',
|
|
1599
|
+
},
|
|
1600
|
+
]
|
|
1601
|
+
)
|
|
@@ -188,6 +188,7 @@ text_responses: dict[str, str | ToolCallPart] = {
|
|
|
188
188
|
tool_name='weather_forecast', args={'location': 'Paris', 'forecast_date': '2030-01-01'}, tool_call_id='0001'
|
|
189
189
|
),
|
|
190
190
|
'Tell me a joke.': 'Did you hear about the toothpaste scandal? They called it Colgate.',
|
|
191
|
+
'Tell me a different joke.': 'No.',
|
|
191
192
|
'Explain?': 'This is an excellent joke invented by Samuel Colvin, it needs no explanation.',
|
|
192
193
|
'What is the capital of France?': 'Paris',
|
|
193
194
|
'What is the capital of Italy?': 'Rome',
|
|
@@ -500,6 +500,28 @@ def test_dynamic_tool_decorator():
|
|
|
500
500
|
assert r.data == snapshot('success (no tool calls)')
|
|
501
501
|
|
|
502
502
|
|
|
503
|
+
def test_plain_tool_name():
|
|
504
|
+
agent = Agent(FunctionModel(get_json_schema))
|
|
505
|
+
|
|
506
|
+
def my_tool(arg: str) -> str: ...
|
|
507
|
+
|
|
508
|
+
agent.tool_plain(name='foo_tool')(my_tool)
|
|
509
|
+
result = agent.run_sync('Hello')
|
|
510
|
+
json_schema = json.loads(result.data)
|
|
511
|
+
assert json_schema['name'] == 'foo_tool'
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def test_tool_name():
|
|
515
|
+
agent = Agent(FunctionModel(get_json_schema))
|
|
516
|
+
|
|
517
|
+
def my_tool(ctx: RunContext, arg: str) -> str: ...
|
|
518
|
+
|
|
519
|
+
agent.tool(name='foo_tool')(my_tool)
|
|
520
|
+
result = agent.run_sync('Hello')
|
|
521
|
+
json_schema = json.loads(result.data)
|
|
522
|
+
assert json_schema['name'] == 'foo_tool'
|
|
523
|
+
|
|
524
|
+
|
|
503
525
|
def test_dynamic_tool_use_messages():
|
|
504
526
|
async def repeat_call_foobar(_messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
|
|
505
527
|
if info.function_tools:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.0.36 → pydantic_ai-0.0.37}/tests/models/cassettes/test_groq/test_image_url_input.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|