pydantic-ai 0.0.31__tar.gz → 0.0.33__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/PKG-INFO +4 -4
  2. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/pyproject.toml +12 -4
  3. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/conftest.py +1 -0
  4. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/json_body_serializer.py +19 -13
  5. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +3 -3
  6. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +9 -9
  7. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_gemini.py +29 -29
  8. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_instrumented.py +21 -5
  9. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_model.py +2 -2
  10. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_model_names.py +11 -2
  11. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_openai.py +24 -21
  12. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_vertexai.py +3 -0
  13. pydantic_ai-0.0.33/tests/providers/__init__.py +0 -0
  14. pydantic_ai-0.0.33/tests/providers/test_deepseek.py +48 -0
  15. pydantic_ai-0.0.33/tests/providers/test_google_gla.py +19 -0
  16. pydantic_ai-0.0.33/tests/providers/test_google_vertex.py +110 -0
  17. pydantic_ai-0.0.33/tests/providers/test_provider_names.py +44 -0
  18. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_examples.py +5 -3
  19. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_live.py +14 -5
  20. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_logfire.py +58 -56
  21. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/.gitignore +0 -0
  22. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/LICENSE +0 -0
  23. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/Makefile +0 -0
  24. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/README.md +0 -0
  25. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/__init__.py +0 -0
  26. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/assets/kiwi.png +0 -0
  27. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/assets/marcelo.mp3 +0 -0
  28. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/example_modules/README.md +0 -0
  29. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/example_modules/bank_database.py +0 -0
  30. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/example_modules/fake_database.py +0 -0
  31. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/example_modules/weather_service.py +0 -0
  32. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/graph/__init__.py +0 -0
  33. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/graph/test_graph.py +0 -0
  34. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/graph/test_history.py +0 -0
  35. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/graph/test_mermaid.py +0 -0
  36. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/graph/test_state.py +0 -0
  37. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/graph/test_utils.py +0 -0
  38. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/import_examples.py +0 -0
  39. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/__init__.py +0 -0
  40. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  41. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  42. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  43. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  44. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  45. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  46. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  47. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  48. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  49. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/mock_async_stream.py +0 -0
  50. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_anthropic.py +0 -0
  51. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_cohere.py +0 -0
  52. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_fallback.py +0 -0
  53. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_groq.py +0 -0
  54. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_mistral.py +0 -0
  55. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_model_function.py +0 -0
  56. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/models/test_model_test.py +0 -0
  57. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_agent.py +0 -0
  58. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_deps.py +0 -0
  59. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_format_as_xml.py +0 -0
  60. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_json_body_serializer.py +0 -0
  61. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_parts_manager.py +0 -0
  62. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_streaming.py +0 -0
  63. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_tools.py +0 -0
  64. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_usage_limits.py +0 -0
  65. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/test_utils.py +0 -0
  66. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/typed_agent.py +0 -0
  67. {pydantic_ai-0.0.31 → pydantic_ai-0.0.33}/tests/typed_graph.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.0.31
3
+ Version: 0.0.33
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
7
7
  Project-URL: Documentation, https://ai.pydantic.dev
8
8
  Project-URL: Changelog, https://github.com/pydantic/pydantic-ai/releases
9
- Author-email: Samuel Colvin <samuel@pydantic.dev>
9
+ Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
10
10
  License-Expression: MIT
11
11
  License-File: LICENSE
12
12
  Classifier: Development Status :: 4 - Beta
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.31
31
+ Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.33
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.0.31; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.0.33; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=2.3; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -4,9 +4,14 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai"
7
- version = "0.0.31"
7
+ version = "0.0.33"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs"
9
- authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
9
+ authors = [
10
+ { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
11
+ { name = "Marcelo Trylesinski", email = "marcelotryle@gmail.com" },
12
+ { name = "David Montague", email = "david@pydantic.dev" },
13
+ { name = "Alex Hall", email = "alex@pydantic.dev" },
14
+ ]
10
15
  license = "MIT"
11
16
  readme = "README.md"
12
17
  classifiers = [
@@ -32,7 +37,7 @@ classifiers = [
32
37
  requires-python = ">=3.9"
33
38
 
34
39
  dependencies = [
35
- "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.31",
40
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.33",
36
41
  ]
37
42
 
38
43
  [project.urls]
@@ -42,7 +47,7 @@ Documentation = "https://ai.pydantic.dev"
42
47
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
43
48
 
44
49
  [project.optional-dependencies]
45
- examples = ["pydantic-ai-examples==0.0.31"]
50
+ examples = ["pydantic-ai-examples==0.0.33"]
46
51
  logfire = ["logfire>=2.3"]
47
52
 
48
53
  [tool.uv.sources]
@@ -165,6 +170,9 @@ exclude_lines = [
165
170
  [tool.logfire]
166
171
  ignore_no_config = true
167
172
 
173
+ [tool.inline-snapshot]
174
+ format-command="ruff format --stdin-filename {filename}"
175
+
168
176
  [tool.inline-snapshot.shortcuts]
169
177
  snap-fix = ["create", "fix"]
170
178
  snap = ["create"]
@@ -200,6 +200,7 @@ def pytest_recording_configure(config: Any, vcr: VCR):
200
200
  @pytest.fixture(scope='module')
201
201
  def vcr_config():
202
202
  return {
203
+ 'ignore_localhost': True,
203
204
  # Note: additional header filtering is done inside the serializer
204
205
  'filter_headers': ['authorization', 'x-api-key'],
205
206
  'decode_compressed_response': True,
@@ -1,5 +1,6 @@
1
1
  # pyright: reportUnknownMemberType=false, reportUnknownVariableType=false
2
2
  import json
3
+ import urllib.parse
3
4
  from typing import TYPE_CHECKING, Any
4
5
 
5
6
  import yaml
@@ -59,19 +60,24 @@ def serialize(cassette_dict: Any):
59
60
  # update headers on source object
60
61
  data['headers'] = headers
61
62
 
62
- content_type = headers.get('content-type', None)
63
- if content_type != ['application/json']:
64
- continue
65
-
66
- # Parse the body as JSON
67
- body: Any = data.get('body', None)
68
- assert body is not None, data
69
- if isinstance(body, dict):
70
- # Responses will have the body under a field called 'string'
71
- body = body.get('string')
72
- if body is not None:
73
- data['parsed_body'] = json.loads(body)
74
- del data['body']
63
+ content_type = headers.get('content-type', [])
64
+ if any(header.startswith('application/json') for header in content_type):
65
+ # Parse the body as JSON
66
+ body: Any = data.get('body', None)
67
+ assert body is not None, data
68
+ if isinstance(body, dict):
69
+ # Responses will have the body under a field called 'string'
70
+ body = body.get('string')
71
+ if body is not None:
72
+ data['parsed_body'] = json.loads(body)
73
+ if 'access_token' in data['parsed_body']:
74
+ data['parsed_body']['access_token'] = 'scrubbed'
75
+ del data['body']
76
+ if content_type == ['application/x-www-form-urlencoded']:
77
+ query_params = urllib.parse.parse_qs(data['body'])
78
+ if 'client_secret' in query_params:
79
+ query_params['client_secret'] = ['scrubbed']
80
+ data['body'] = urllib.parse.urlencode(query_params)
75
81
 
76
82
  # Use our custom dumper
77
83
  return yaml.dump(cassette_dict, Dumper=LiteralDumper, allow_unicode=True, width=120)
@@ -38,7 +38,7 @@ interactions:
38
38
  "role": "model"
39
39
  },
40
40
  "finishReason": "STOP",
41
- "avgLogprobs": -0.02894418438275655
41
+ "avgLogprobs": -0.031536102294921875
42
42
  }
43
43
  ],
44
44
  "usageMetadata": {
@@ -68,11 +68,11 @@ interactions:
68
68
  alt-svc:
69
69
  - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
70
70
  content-length:
71
- - '710'
71
+ - '711'
72
72
  content-type:
73
73
  - application/json; charset=UTF-8
74
74
  server-timing:
75
- - gfet4t7; dur=2776
75
+ - gfet4t7; dur=2657
76
76
  transfer-encoding:
77
77
  - chunked
78
78
  vary:
@@ -6793,7 +6793,7 @@ interactions:
6793
6793
  access-control-allow-origin:
6794
6794
  - '*'
6795
6795
  age:
6796
- - '42504'
6796
+ - '88'
6797
6797
  alt-svc:
6798
6798
  - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
6799
6799
  cache-control:
@@ -6809,7 +6809,7 @@ interactions:
6809
6809
  cross-origin-resource-policy:
6810
6810
  - cross-origin
6811
6811
  expires:
6812
- - Wed, 25 Feb 2026 21:07:23 GMT
6812
+ - Wed, 04 Mar 2026 10:32:03 GMT
6813
6813
  last-modified:
6814
6814
  - Mon, 30 Aug 2123 17:01:05 GMT
6815
6815
  report-to:
@@ -6850,19 +6850,19 @@ interactions:
6850
6850
  "content": {
6851
6851
  "parts": [
6852
6852
  {
6853
- "text": "This is not a fruit, it is an organ console."
6853
+ "text": "This is not a fruit; it's a pipe organ console."
6854
6854
  }
6855
6855
  ],
6856
6856
  "role": "model"
6857
6857
  },
6858
6858
  "finishReason": "STOP",
6859
- "avgLogprobs": -0.4544379711151123
6859
+ "avgLogprobs": -0.31288215092250277
6860
6860
  }
6861
6861
  ],
6862
6862
  "usageMetadata": {
6863
6863
  "promptTokenCount": 1814,
6864
- "candidatesTokenCount": 12,
6865
- "totalTokenCount": 1826,
6864
+ "candidatesTokenCount": 14,
6865
+ "totalTokenCount": 1828,
6866
6866
  "promptTokensDetails": [
6867
6867
  {
6868
6868
  "modality": "TEXT",
@@ -6876,7 +6876,7 @@ interactions:
6876
6876
  "candidatesTokensDetails": [
6877
6877
  {
6878
6878
  "modality": "TEXT",
6879
- "tokenCount": 12
6879
+ "tokenCount": 14
6880
6880
  }
6881
6881
  ]
6882
6882
  },
@@ -6886,11 +6886,11 @@ interactions:
6886
6886
  alt-svc:
6887
6887
  - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
6888
6888
  content-length:
6889
- - '726'
6889
+ - '730'
6890
6890
  content-type:
6891
6891
  - application/json; charset=UTF-8
6892
6892
  server-timing:
6893
- - gfet4t7; dur=6687
6893
+ - gfet4t7; dur=1749
6894
6894
  transfer-encoding:
6895
6895
  - chunked
6896
6896
  vary:
@@ -45,6 +45,7 @@ from pydantic_ai.models.gemini import (
45
45
  _GeminiTools,
46
46
  _GeminiUsageMetaData,
47
47
  )
48
+ from pydantic_ai.providers.google_gla import GoogleGLAProvider
48
49
  from pydantic_ai.result import Usage
49
50
  from pydantic_ai.tools import ToolDefinition
50
51
 
@@ -55,9 +56,8 @@ pytestmark = pytest.mark.anyio
55
56
 
56
57
  def test_api_key_arg(env: TestEnv):
57
58
  env.set('GEMINI_API_KEY', 'via-env-var')
58
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
59
- assert isinstance(m.auth, ApiKeyAuth)
60
- assert m.auth.api_key == 'via-arg'
59
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
60
+ assert m.client.headers['x-goog-api-key'] == 'via-arg'
61
61
 
62
62
 
63
63
  def test_api_key_env_var(env: TestEnv):
@@ -80,11 +80,10 @@ def test_api_key_empty(env: TestEnv):
80
80
 
81
81
 
82
82
  async def test_model_simple(allow_model_requests: None):
83
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
84
- assert isinstance(m.http_client, httpx.AsyncClient)
83
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
84
+ assert isinstance(m.client, httpx.AsyncClient)
85
85
  assert m.model_name == 'gemini-1.5-flash'
86
- assert isinstance(m.auth, ApiKeyAuth)
87
- assert m.auth.api_key == 'via-arg'
86
+ assert 'x-goog-api-key' in m.client.headers
88
87
 
89
88
  arc = ModelRequestParameters(function_tools=[], allow_text_result=True, result_tools=[])
90
89
  tools = m._get_tools(arc)
@@ -94,7 +93,7 @@ async def test_model_simple(allow_model_requests: None):
94
93
 
95
94
 
96
95
  async def test_model_tools(allow_model_requests: None):
97
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
96
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
98
97
  tools = [
99
98
  ToolDefinition(
100
99
  'foo',
@@ -153,7 +152,7 @@ async def test_model_tools(allow_model_requests: None):
153
152
 
154
153
 
155
154
  async def test_require_response_tool(allow_model_requests: None):
156
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
155
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
157
156
  result_tool = ToolDefinition(
158
157
  'result',
159
158
  'This is the tool for the final Result',
@@ -212,7 +211,7 @@ async def test_json_def_replaced(allow_model_requests: None):
212
211
  }
213
212
  )
214
213
 
215
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
214
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
216
215
  result_tool = ToolDefinition(
217
216
  'result',
218
217
  'This is the tool for the final Result',
@@ -259,7 +258,7 @@ async def test_json_def_replaced_any_of(allow_model_requests: None):
259
258
 
260
259
  json_schema = Locations.model_json_schema()
261
260
 
262
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
261
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
263
262
  result_tool = ToolDefinition(
264
263
  'result',
265
264
  'This is the tool for the final Result',
@@ -322,7 +321,7 @@ async def test_json_def_recursive(allow_model_requests: None):
322
321
  }
323
322
  )
324
323
 
325
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
324
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
326
325
  result_tool = ToolDefinition(
327
326
  'result',
328
327
  'This is the tool for the final Result',
@@ -354,7 +353,7 @@ async def test_json_def_date(allow_model_requests: None):
354
353
  }
355
354
  )
356
355
 
357
- m = GeminiModel('gemini-1.5-flash', api_key='via-arg')
356
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key='via-arg'))
358
357
  result_tool = ToolDefinition(
359
358
  'result',
360
359
  'This is the tool for the final Result',
@@ -451,7 +450,7 @@ def example_usage() -> _GeminiUsageMetaData:
451
450
  async def test_text_success(get_gemini_client: GetGeminiClient):
452
451
  response = gemini_response(_content_model_response(ModelResponse(parts=[TextPart('Hello world')])))
453
452
  gemini_client = get_gemini_client(response)
454
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
453
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
455
454
  agent = Agent(m)
456
455
 
457
456
  result = await agent.run('Hello')
@@ -493,7 +492,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient):
493
492
  _content_model_response(ModelResponse(parts=[ToolCallPart('final_result', {'response': [1, 2, 123]})]))
494
493
  )
495
494
  gemini_client = get_gemini_client(response)
496
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
495
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
497
496
  agent = Agent(m, result_type=list[int])
498
497
 
499
498
  result = await agent.run('Hello')
@@ -540,7 +539,7 @@ async def test_request_tool_call(get_gemini_client: GetGeminiClient):
540
539
  gemini_response(_content_model_response(ModelResponse(parts=[TextPart('final response')]))),
541
540
  ]
542
541
  gemini_client = get_gemini_client(responses)
543
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
542
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
544
543
  agent = Agent(m, system_prompt='this is the system prompt')
545
544
 
546
545
  @agent.tool_plain
@@ -622,7 +621,7 @@ async def test_unexpected_response(client_with_handler: ClientWithHandler, env:
622
621
  return httpx.Response(401, content='invalid request')
623
622
 
624
623
  gemini_client = client_with_handler(handler)
625
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
624
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
626
625
  agent = Agent(m, system_prompt='this is the system prompt')
627
626
 
628
627
  with pytest.raises(ModelHTTPError) as exc_info:
@@ -639,7 +638,7 @@ async def test_stream_text(get_gemini_client: GetGeminiClient):
639
638
  json_data = _gemini_streamed_response_ta.dump_json(responses, by_alias=True)
640
639
  stream = AsyncByteStreamList([json_data[:100], json_data[100:200], json_data[200:]])
641
640
  gemini_client = get_gemini_client(stream)
642
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
641
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
643
642
  agent = Agent(m)
644
643
 
645
644
  async with agent.run_stream('Hello') as result:
@@ -684,7 +683,7 @@ async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient):
684
683
 
685
684
  stream = AsyncByteStreamList(parts)
686
685
  gemini_client = get_gemini_client(stream)
687
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
686
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
688
687
  agent = Agent(m)
689
688
 
690
689
  async with agent.run_stream('Hello') as result:
@@ -698,7 +697,7 @@ async def test_stream_text_no_data(get_gemini_client: GetGeminiClient):
698
697
  json_data = _gemini_streamed_response_ta.dump_json(responses, by_alias=True)
699
698
  stream = AsyncByteStreamList([json_data[:100], json_data[100:200], json_data[200:]])
700
699
  gemini_client = get_gemini_client(stream)
701
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
700
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
702
701
  agent = Agent(m)
703
702
  with pytest.raises(UnexpectedModelBehavior, match='Streamed response ended without con'):
704
703
  async with agent.run_stream('Hello'):
@@ -714,7 +713,7 @@ async def test_stream_structured(get_gemini_client: GetGeminiClient):
714
713
  json_data = _gemini_streamed_response_ta.dump_json(responses, by_alias=True)
715
714
  stream = AsyncByteStreamList([json_data[:100], json_data[100:200], json_data[200:]])
716
715
  gemini_client = get_gemini_client(stream)
717
- model = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
716
+ model = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
718
717
  agent = Agent(model, result_type=tuple[int, int])
719
718
 
720
719
  async with agent.run_stream('Hello') as result:
@@ -744,7 +743,7 @@ async def test_stream_structured_tool_calls(get_gemini_client: GetGeminiClient):
744
743
  second_stream = AsyncByteStreamList([d2[:100], d2[100:]])
745
744
 
746
745
  gemini_client = get_gemini_client([first_stream, second_stream])
747
- model = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
746
+ model = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
748
747
  agent = Agent(model, result_type=tuple[int, int])
749
748
  tool_calls: list[str] = []
750
749
 
@@ -817,7 +816,7 @@ async def test_stream_text_heterogeneous(get_gemini_client: GetGeminiClient):
817
816
  json_data = _gemini_streamed_response_ta.dump_json(responses, by_alias=True)
818
817
  stream = AsyncByteStreamList([json_data[:100], json_data[100:200], json_data[200:]])
819
818
  gemini_client = get_gemini_client(stream)
820
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client)
819
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client))
821
820
  agent = Agent(m)
822
821
 
823
822
  @agent.tool_plain()
@@ -887,7 +886,7 @@ async def test_model_settings(client_with_handler: ClientWithHandler, env: TestE
887
886
  )
888
887
 
889
888
  gemini_client = client_with_handler(handler)
890
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client, api_key='mock')
889
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client, api_key='mock'))
891
890
  agent = Agent(m)
892
891
 
893
892
  result = await agent.run(
@@ -939,7 +938,8 @@ async def test_safety_settings_unsafe(
939
938
  )
940
939
 
941
940
  gemini_client = client_with_handler(handler)
942
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client, api_key='mock')
941
+
942
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client, api_key='mock'))
943
943
  agent = Agent(m)
944
944
 
945
945
  await agent.run(
@@ -975,7 +975,7 @@ async def test_safety_settings_safe(
975
975
  )
976
976
 
977
977
  gemini_client = client_with_handler(handler)
978
- m = GeminiModel('gemini-1.5-flash', http_client=gemini_client, api_key='mock')
978
+ m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(http_client=gemini_client, api_key='mock'))
979
979
  agent = Agent(m)
980
980
 
981
981
  result = await agent.run(
@@ -994,7 +994,7 @@ async def test_safety_settings_safe(
994
994
  async def test_image_as_binary_content_input(
995
995
  allow_model_requests: None, gemini_api_key: str, image_content: BinaryContent
996
996
  ) -> None:
997
- m = GeminiModel('gemini-2.0-flash', api_key=gemini_api_key)
997
+ m = GeminiModel('gemini-2.0-flash', provider=GoogleGLAProvider(api_key=gemini_api_key))
998
998
  agent = Agent(m)
999
999
 
1000
1000
  result = await agent.run(['What is the name of this fruit?', image_content])
@@ -1003,10 +1003,10 @@ async def test_image_as_binary_content_input(
1003
1003
 
1004
1004
  @pytest.mark.vcr()
1005
1005
  async def test_image_url_input(allow_model_requests: None, gemini_api_key: str) -> None:
1006
- m = GeminiModel('gemini-2.0-flash-exp', api_key=gemini_api_key)
1006
+ m = GeminiModel('gemini-2.0-flash-exp', provider=GoogleGLAProvider(api_key=gemini_api_key))
1007
1007
  agent = Agent(m)
1008
1008
 
1009
1009
  image_url = ImageUrl(url='https://goo.gle/instrument-img')
1010
1010
 
1011
1011
  result = await agent.run(['What is the name of this fruit?', image_url])
1012
- assert result.data == snapshot('This is not a fruit, it is an organ console.')
1012
+ assert result.data == snapshot("This is not a fruit; it's a pipe organ console.")
@@ -106,7 +106,7 @@ class MyResponseStream(StreamedResponse):
106
106
  @pytest.mark.anyio
107
107
  @requires_logfire_events
108
108
  async def test_instrumented_model(capfire: CaptureLogfire):
109
- model = InstrumentedModel.from_logfire(MyModel(), event_mode='logs')
109
+ model = InstrumentedModel(MyModel(), event_mode='logs')
110
110
  assert model.system == 'my_system'
111
111
  assert model.model_name == 'my_model'
112
112
 
@@ -168,6 +168,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
168
168
  'severity_text': None,
169
169
  'attributes': {
170
170
  'gen_ai.system': 'my_system',
171
+ 'gen_ai.message.index': 0,
171
172
  'event.name': 'gen_ai.system.message',
172
173
  },
173
174
  'timestamp': 2000000000,
@@ -182,6 +183,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
182
183
  'severity_text': None,
183
184
  'attributes': {
184
185
  'gen_ai.system': 'my_system',
186
+ 'gen_ai.message.index': 0,
185
187
  'event.name': 'gen_ai.user.message',
186
188
  },
187
189
  'timestamp': 4000000000,
@@ -196,6 +198,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
196
198
  'severity_text': None,
197
199
  'attributes': {
198
200
  'gen_ai.system': 'my_system',
201
+ 'gen_ai.message.index': 0,
199
202
  'event.name': 'gen_ai.tool.message',
200
203
  },
201
204
  'timestamp': 6000000000,
@@ -218,6 +221,7 @@ Fix the errors and try again.\
218
221
  'severity_text': None,
219
222
  'attributes': {
220
223
  'gen_ai.system': 'my_system',
224
+ 'gen_ai.message.index': 0,
221
225
  'event.name': 'gen_ai.tool.message',
222
226
  },
223
227
  'timestamp': 8000000000,
@@ -239,6 +243,7 @@ Fix the errors and try again.\
239
243
  'severity_text': None,
240
244
  'attributes': {
241
245
  'gen_ai.system': 'my_system',
246
+ 'gen_ai.message.index': 0,
242
247
  'event.name': 'gen_ai.user.message',
243
248
  },
244
249
  'timestamp': 10000000000,
@@ -253,6 +258,7 @@ Fix the errors and try again.\
253
258
  'severity_text': None,
254
259
  'attributes': {
255
260
  'gen_ai.system': 'my_system',
261
+ 'gen_ai.message.index': 1,
256
262
  'event.name': 'gen_ai.assistant.message',
257
263
  },
258
264
  'timestamp': 12000000000,
@@ -324,7 +330,7 @@ async def test_instrumented_model_not_recording():
324
330
  @pytest.mark.anyio
325
331
  @requires_logfire_events
326
332
  async def test_instrumented_model_stream(capfire: CaptureLogfire):
327
- model = InstrumentedModel.from_logfire(MyModel(), event_mode='logs')
333
+ model = InstrumentedModel(MyModel(), event_mode='logs')
328
334
 
329
335
  messages: list[ModelMessage] = [
330
336
  ModelRequest(
@@ -380,6 +386,7 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire):
380
386
  'severity_text': None,
381
387
  'attributes': {
382
388
  'gen_ai.system': 'my_system',
389
+ 'gen_ai.message.index': 0,
383
390
  'event.name': 'gen_ai.user.message',
384
391
  },
385
392
  'timestamp': 2000000000,
@@ -406,7 +413,7 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire):
406
413
  @pytest.mark.anyio
407
414
  @requires_logfire_events
408
415
  async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
409
- model = InstrumentedModel.from_logfire(MyModel(), event_mode='logs')
416
+ model = InstrumentedModel(MyModel(), event_mode='logs')
410
417
 
411
418
  messages: list[ModelMessage] = [
412
419
  ModelRequest(
@@ -474,6 +481,7 @@ async def test_instrumented_model_stream_break(capfire: CaptureLogfire):
474
481
  'severity_text': None,
475
482
  'attributes': {
476
483
  'gen_ai.system': 'my_system',
484
+ 'gen_ai.message.index': 0,
477
485
  'event.name': 'gen_ai.user.message',
478
486
  },
479
487
  'timestamp': 2000000000,
@@ -555,12 +563,14 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
555
563
  'event.name': 'gen_ai.system.message',
556
564
  'content': 'system_prompt',
557
565
  'role': 'system',
566
+ 'gen_ai.message.index': 0,
558
567
  'gen_ai.system': 'my_system',
559
568
  },
560
569
  {
561
570
  'event.name': 'gen_ai.user.message',
562
571
  'content': 'user_prompt',
563
572
  'role': 'user',
573
+ 'gen_ai.message.index': 0,
564
574
  'gen_ai.system': 'my_system',
565
575
  },
566
576
  {
@@ -568,6 +578,7 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
568
578
  'content': 'tool_return_content',
569
579
  'role': 'tool',
570
580
  'id': 'tool_call_3',
581
+ 'gen_ai.message.index': 0,
571
582
  'gen_ai.system': 'my_system',
572
583
  },
573
584
  {
@@ -579,6 +590,7 @@ Fix the errors and try again.\
579
590
  """,
580
591
  'role': 'tool',
581
592
  'id': 'tool_call_4',
593
+ 'gen_ai.message.index': 0,
582
594
  'gen_ai.system': 'my_system',
583
595
  },
584
596
  {
@@ -589,12 +601,14 @@ retry_prompt2
589
601
  Fix the errors and try again.\
590
602
  """,
591
603
  'role': 'user',
604
+ 'gen_ai.message.index': 0,
592
605
  'gen_ai.system': 'my_system',
593
606
  },
594
607
  {
595
608
  'event.name': 'gen_ai.assistant.message',
596
609
  'role': 'assistant',
597
610
  'content': 'text3',
611
+ 'gen_ai.message.index': 1,
598
612
  'gen_ai.system': 'my_system',
599
613
  },
600
614
  {
@@ -641,7 +655,7 @@ def test_messages_to_otel_events_serialization_errors():
641
655
 
642
656
  class Bar:
643
657
  def __repr__(self):
644
- raise ValueError
658
+ raise ValueError('error!')
645
659
 
646
660
  messages = [
647
661
  ModelResponse(parts=[ToolCallPart('tool', {'arg': Foo()})]),
@@ -654,10 +668,12 @@ def test_messages_to_otel_events_serialization_errors():
654
668
  [
655
669
  {
656
670
  'body': "{'role': 'assistant', 'tool_calls': [{'id': None, 'type': 'function', 'function': {'name': 'tool', 'arguments': {'arg': Foo()}}}]}",
671
+ 'gen_ai.message.index': 0,
657
672
  'event.name': 'gen_ai.assistant.message',
658
673
  },
659
674
  {
660
- 'body': 'Unable to serialize event body',
675
+ 'body': 'Unable to serialize: error!',
676
+ 'gen_ai.message.index': 1,
661
677
  'event.name': 'gen_ai.tool.message',
662
678
  },
663
679
  ]
@@ -19,7 +19,7 @@ TEST_CASES = [
19
19
  'gemini-1.5-flash',
20
20
  'google-vertex',
21
21
  'vertexai',
22
- 'VertexAIModel',
22
+ 'GeminiModel',
23
23
  ),
24
24
  (
25
25
  'GEMINI_API_KEY',
@@ -27,7 +27,7 @@ TEST_CASES = [
27
27
  'gemini-1.5-flash',
28
28
  'google-vertex',
29
29
  'vertexai',
30
- 'VertexAIModel',
30
+ 'GeminiModel',
31
31
  ),
32
32
  (
33
33
  'ANTHROPIC_API_KEY',
@@ -1,7 +1,8 @@
1
1
  from collections.abc import Iterator
2
- from typing import Any, get_args
2
+ from typing import Any
3
3
 
4
4
  import pytest
5
+ from typing_extensions import get_args
5
6
 
6
7
  from pydantic_ai.models import KnownModelName
7
8
 
@@ -40,10 +41,18 @@ def test_known_model_names():
40
41
  openai_names = [f'openai:{n}' for n in get_model_names(OpenAIModelName)] + [
41
42
  n for n in get_model_names(OpenAIModelName) if n.startswith('o1') or n.startswith('gpt') or n.startswith('o3')
42
43
  ]
44
+ deepseek_names = ['deepseek:deepseek-chat', 'deepseek:deepseek-reasoner']
43
45
  extra_names = ['test']
44
46
 
45
47
  generated_names = sorted(
46
- anthropic_names + cohere_names + google_names + groq_names + mistral_names + openai_names + extra_names
48
+ anthropic_names
49
+ + cohere_names
50
+ + google_names
51
+ + groq_names
52
+ + mistral_names
53
+ + openai_names
54
+ + deepseek_names
55
+ + extra_names
47
56
  )
48
57
 
49
58
  known_model_names = sorted(get_args(KnownModelName))