pydantic-ai 0.0.38__tar.gz → 0.0.39__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai might be problematic. Click here for more details.

Files changed (90) hide show
  1. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/PKG-INFO +3 -8
  2. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/README.md +0 -5
  3. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/pyproject.toml +3 -3
  4. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_groq.py +33 -16
  5. pydantic_ai-0.0.39/tests/providers/test_groq.py +66 -0
  6. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/providers/test_provider_names.py +2 -0
  7. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_live.py +2 -1
  8. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/.gitignore +0 -0
  9. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/LICENSE +0 -0
  10. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/Makefile +0 -0
  11. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/__init__.py +0 -0
  12. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/assets/dummy.pdf +0 -0
  13. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/assets/kiwi.png +0 -0
  14. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/assets/marcelo.mp3 +0 -0
  15. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/conftest.py +0 -0
  16. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/example_modules/README.md +0 -0
  17. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/example_modules/bank_database.py +0 -0
  18. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/example_modules/fake_database.py +0 -0
  19. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/example_modules/weather_service.py +0 -0
  20. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/graph/__init__.py +0 -0
  21. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/graph/test_graph.py +0 -0
  22. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/graph/test_history.py +0 -0
  23. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/graph/test_mermaid.py +0 -0
  24. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/graph/test_state.py +0 -0
  25. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/graph/test_utils.py +0 -0
  26. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/import_examples.py +0 -0
  27. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/json_body_serializer.py +0 -0
  28. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/__init__.py +0 -0
  29. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
  30. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  31. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  32. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  33. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
  34. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
  35. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
  36. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
  37. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
  38. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
  39. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
  40. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
  41. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
  42. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
  43. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
  44. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
  45. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
  46. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
  47. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
  48. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  49. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  50. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  51. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  52. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  53. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
  54. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  55. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  56. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  57. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/mock_async_stream.py +0 -0
  58. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_anthropic.py +0 -0
  59. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_bedrock.py +0 -0
  60. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_cohere.py +0 -0
  61. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_fallback.py +0 -0
  62. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_gemini.py +0 -0
  63. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_instrumented.py +0 -0
  64. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_mistral.py +0 -0
  65. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_model.py +0 -0
  66. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_model_function.py +0 -0
  67. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_model_names.py +0 -0
  68. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_model_test.py +0 -0
  69. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_openai.py +0 -0
  70. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/models/test_vertexai.py +0 -0
  71. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/providers/__init__.py +0 -0
  72. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/providers/test_bedrock.py +0 -0
  73. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/providers/test_deepseek.py +0 -0
  74. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/providers/test_google_gla.py +0 -0
  75. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/providers/test_google_vertex.py +0 -0
  76. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_agent.py +0 -0
  77. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_cli.py +0 -0
  78. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_deps.py +0 -0
  79. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_examples.py +0 -0
  80. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_format_as_xml.py +0 -0
  81. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_json_body_serializer.py +0 -0
  82. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_logfire.py +0 -0
  83. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_messages.py +0 -0
  84. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_parts_manager.py +0 -0
  85. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_streaming.py +0 -0
  86. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_tools.py +0 -0
  87. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_usage_limits.py +0 -0
  88. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/test_utils.py +0 -0
  89. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/typed_agent.py +0 -0
  90. {pydantic_ai-0.0.38 → pydantic_ai-0.0.39}/tests/typed_graph.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.0.38
3
+ Version: 0.0.39
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mistral,openai,vertexai]==0.0.38
31
+ Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mistral,openai,vertexai]==0.0.39
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.0.38; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.0.39; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=2.3; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -98,11 +98,6 @@ Provides the ability to [stream](https://ai.pydantic.dev/results/#streamed-resul
98
98
  * __Graph Support__
99
99
  [Pydantic Graph](https://ai.pydantic.dev/graph) provides a powerful way to define graphs using typing hints, this is useful in complex applications where standard control flow can degrade to spaghetti code.
100
100
 
101
- ## In Beta!
102
-
103
- PydanticAI is in early beta, the API is still subject to change and there's a lot more to do.
104
- [Feedback](https://github.com/pydantic/pydantic-ai/issues) is very welcome!
105
-
106
101
  ## Hello World Example
107
102
 
108
103
  Here's a minimal example of PydanticAI:
@@ -61,11 +61,6 @@ Provides the ability to [stream](https://ai.pydantic.dev/results/#streamed-resul
61
61
  * __Graph Support__
62
62
  [Pydantic Graph](https://ai.pydantic.dev/graph) provides a powerful way to define graphs using typing hints, this is useful in complex applications where standard control flow can degrade to spaghetti code.
63
63
 
64
- ## In Beta!
65
-
66
- PydanticAI is in early beta, the API is still subject to change and there's a lot more to do.
67
- [Feedback](https://github.com/pydantic/pydantic-ai/issues) is very welcome!
68
-
69
64
  ## Hello World Example
70
65
 
71
66
  Here's a minimal example of PydanticAI:
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai"
7
- version = "0.0.38"
7
+ version = "0.0.39"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs"
9
9
  authors = [
10
10
  { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
@@ -36,7 +36,7 @@ classifiers = [
36
36
  ]
37
37
  requires-python = ">=3.9"
38
38
  dependencies = [
39
- "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli]==0.0.38",
39
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli]==0.0.39",
40
40
  ]
41
41
 
42
42
  [project.urls]
@@ -46,7 +46,7 @@ Documentation = "https://ai.pydantic.dev"
46
46
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
47
47
 
48
48
  [project.optional-dependencies]
49
- examples = ["pydantic-ai-examples==0.0.38"]
49
+ examples = ["pydantic-ai-examples==0.0.39"]
50
50
  logfire = ["logfire>=2.3"]
51
51
 
52
52
  [tool.uv.sources]
@@ -1,11 +1,13 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import json
4
+ import os
4
5
  from collections.abc import Sequence
5
6
  from dataclasses import dataclass
6
7
  from datetime import datetime, timezone
7
8
  from functools import cached_property
8
9
  from typing import Any, Literal, Union, cast
10
+ from unittest.mock import patch
9
11
 
10
12
  import httpx
11
13
  import pytest
@@ -25,7 +27,7 @@ from pydantic_ai.messages import (
25
27
  ToolReturnPart,
26
28
  UserPromptPart,
27
29
  )
28
- from pydantic_ai.result import Usage
30
+ from pydantic_ai.usage import Usage
29
31
 
30
32
  from ..conftest import IsNow, raise_if_exception, try_import
31
33
  from .mock_async_stream import MockAsyncStream
@@ -45,6 +47,7 @@ with try_import() as imports_successful:
45
47
  from groq.types.completion_usage import CompletionUsage
46
48
 
47
49
  from pydantic_ai.models.groq import GroqModel
50
+ from pydantic_ai.providers.groq import GroqProvider
48
51
 
49
52
  # note: we use Union here so that casting works with Python 3.9
50
53
  MockChatCompletion = Union[chat.ChatCompletion, Exception]
@@ -57,7 +60,7 @@ pytestmark = [
57
60
 
58
61
 
59
62
  def test_init():
60
- m = GroqModel('llama-3.3-70b-versatile', api_key='foobar')
63
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(api_key='foobar'))
61
64
  assert m.client.api_key == 'foobar'
62
65
  assert m.model_name == 'llama-3.3-70b-versatile'
63
66
  assert m.system == 'groq'
@@ -121,7 +124,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
121
124
  async def test_request_simple_success(allow_model_requests: None):
122
125
  c = completion_message(ChatCompletionMessage(content='world', role='assistant'))
123
126
  mock_client = MockGroq.create_mock(c)
124
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
127
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
125
128
  agent = Agent(m)
126
129
 
127
130
  result = await agent.run('hello')
@@ -158,7 +161,7 @@ async def test_request_simple_usage(allow_model_requests: None):
158
161
  usage=CompletionUsage(completion_tokens=1, prompt_tokens=2, total_tokens=3),
159
162
  )
160
163
  mock_client = MockGroq.create_mock(c)
161
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
164
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
162
165
  agent = Agent(m)
163
166
 
164
167
  result = await agent.run('Hello')
@@ -180,7 +183,7 @@ async def test_request_structured_response(allow_model_requests: None):
180
183
  )
181
184
  )
182
185
  mock_client = MockGroq.create_mock(c)
183
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
186
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
184
187
  agent = Agent(m, result_type=list[int])
185
188
 
186
189
  result = await agent.run('Hello')
@@ -254,7 +257,7 @@ async def test_request_tool_call(allow_model_requests: None):
254
257
  completion_message(ChatCompletionMessage(content='final response', role='assistant')),
255
258
  ]
256
259
  mock_client = MockGroq.create_mock(responses)
257
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
260
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
258
261
  agent = Agent(m, system_prompt='this is the system prompt')
259
262
 
260
263
  @agent.tool_plain
@@ -349,7 +352,7 @@ def text_chunk(text: str, finish_reason: FinishReason | None = None) -> chat.Cha
349
352
  async def test_stream_text(allow_model_requests: None):
350
353
  stream = text_chunk('hello '), text_chunk('world'), chunk([])
351
354
  mock_client = MockGroq.create_mock_stream(stream)
352
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
355
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
353
356
  agent = Agent(m)
354
357
 
355
358
  async with agent.run_stream('') as result:
@@ -361,7 +364,7 @@ async def test_stream_text(allow_model_requests: None):
361
364
  async def test_stream_text_finish_reason(allow_model_requests: None):
362
365
  stream = text_chunk('hello '), text_chunk('world'), text_chunk('.', finish_reason='stop')
363
366
  mock_client = MockGroq.create_mock_stream(stream)
364
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
367
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
365
368
  agent = Agent(m)
366
369
 
367
370
  async with agent.run_stream('') as result:
@@ -408,7 +411,7 @@ async def test_stream_structured(allow_model_requests: None):
408
411
  chunk([]),
409
412
  )
410
413
  mock_client = MockGroq.create_mock_stream(stream)
411
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
414
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
412
415
  agent = Agent(m, result_type=MyTypedDict)
413
416
 
414
417
  async with agent.run_stream('') as result:
@@ -459,7 +462,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
459
462
  struc_chunk(None, None, finish_reason='stop'),
460
463
  )
461
464
  mock_client = MockGroq.create_mock_stream(stream)
462
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
465
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
463
466
  agent = Agent(m, result_type=MyTypedDict)
464
467
 
465
468
  async with agent.run_stream('') as result:
@@ -479,7 +482,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
479
482
  async def test_no_content(allow_model_requests: None):
480
483
  stream = chunk([ChoiceDelta()]), chunk([ChoiceDelta()])
481
484
  mock_client = MockGroq.create_mock_stream(stream)
482
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
485
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
483
486
  agent = Agent(m, result_type=MyTypedDict)
484
487
 
485
488
  with pytest.raises(UnexpectedModelBehavior, match='Received empty model response'):
@@ -490,7 +493,7 @@ async def test_no_content(allow_model_requests: None):
490
493
  async def test_no_delta(allow_model_requests: None):
491
494
  stream = chunk([]), text_chunk('hello '), text_chunk('world')
492
495
  mock_client = MockGroq.create_mock_stream(stream)
493
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
496
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
494
497
  agent = Agent(m)
495
498
 
496
499
  async with agent.run_stream('') as result:
@@ -501,7 +504,7 @@ async def test_no_delta(allow_model_requests: None):
501
504
 
502
505
  @pytest.mark.vcr()
503
506
  async def test_image_url_input(allow_model_requests: None, groq_api_key: str):
504
- m = GroqModel('llama-3.2-11b-vision-preview', api_key=groq_api_key)
507
+ m = GroqModel('llama-3.2-11b-vision-preview', provider=GroqProvider(api_key=groq_api_key))
505
508
  agent = Agent(m)
506
509
 
507
510
  result = await agent.run(
@@ -530,7 +533,7 @@ Potatoes are a versatile food that can be prepared in many different ways, such
530
533
  async def test_audio_as_binary_content_input(allow_model_requests: None, media_type: str):
531
534
  c = completion_message(ChatCompletionMessage(content='world', role='assistant'))
532
535
  mock_client = MockGroq.create_mock(c)
533
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
536
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
534
537
  agent = Agent(m)
535
538
 
536
539
  base64_content = b'//uQZ'
@@ -543,7 +546,7 @@ async def test_audio_as_binary_content_input(allow_model_requests: None, media_t
543
546
  async def test_image_as_binary_content_input(
544
547
  allow_model_requests: None, groq_api_key: str, image_content: BinaryContent
545
548
  ) -> None:
546
- m = GroqModel('llama-3.2-11b-vision-preview', api_key=groq_api_key)
549
+ m = GroqModel('llama-3.2-11b-vision-preview', provider=GroqProvider(api_key=groq_api_key))
547
550
  agent = Agent(m)
548
551
 
549
552
  result = await agent.run(['What is the name of this fruit?', image_content])
@@ -560,10 +563,24 @@ def test_model_status_error(allow_model_requests: None) -> None:
560
563
  body={'error': 'test error'},
561
564
  )
562
565
  )
563
- m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
566
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client))
564
567
  agent = Agent(m)
565
568
  with pytest.raises(ModelHTTPError) as exc_info:
566
569
  agent.run_sync('hello')
567
570
  assert str(exc_info.value) == snapshot(
568
571
  "status_code: 500, model_name: llama-3.3-70b-versatile, body: {'error': 'test error'}"
569
572
  )
573
+
574
+
575
+ async def test_init_with_provider():
576
+ provider = GroqProvider(api_key='api-key')
577
+ model = GroqModel('llama3-8b-8192', provider=provider)
578
+ assert model.model_name == 'llama3-8b-8192'
579
+ assert model.client == provider.client
580
+
581
+
582
+ async def test_init_with_provider_string():
583
+ with patch.dict(os.environ, {'GROQ_API_KEY': 'env-api-key'}, clear=False):
584
+ model = GroqModel('llama3-8b-8192', provider='groq')
585
+ assert model.model_name == 'llama3-8b-8192'
586
+ assert model.client is not None
@@ -0,0 +1,66 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ import os
4
+ import re
5
+ from unittest.mock import patch
6
+
7
+ import httpx
8
+ import pytest
9
+
10
+ from ..conftest import try_import
11
+
12
+ with try_import() as imports_successful:
13
+ from groq import AsyncGroq
14
+
15
+ from pydantic_ai.providers.groq import GroqProvider
16
+
17
+
18
+ pytestmark = pytest.mark.skipif(not imports_successful(), reason='groq not installed')
19
+
20
+
21
+ def test_groq_provider():
22
+ provider = GroqProvider(api_key='api-key')
23
+ assert provider.name == 'groq'
24
+ assert provider.base_url == 'https://api.groq.com'
25
+ assert isinstance(provider.client, AsyncGroq)
26
+ assert provider.client.api_key == 'api-key'
27
+
28
+
29
+ def test_groq_provider_need_api_key() -> None:
30
+ with patch.dict(os.environ, {}, clear=True):
31
+ with pytest.raises(
32
+ ValueError,
33
+ match=re.escape(
34
+ 'Set the `GROQ_API_KEY` environment variable or pass it via `GroqProvider(api_key=...)`'
35
+ 'to use the Groq provider.'
36
+ ),
37
+ ):
38
+ GroqProvider()
39
+
40
+
41
+ def test_groq_provider_pass_http_client() -> None:
42
+ http_client = httpx.AsyncClient()
43
+ provider = GroqProvider(http_client=http_client, api_key='api-key')
44
+ assert provider.client._client == http_client # type: ignore[reportPrivateUsage]
45
+
46
+
47
+ def test_groq_provider_pass_groq_client() -> None:
48
+ groq_client = AsyncGroq(api_key='api-key')
49
+ provider = GroqProvider(groq_client=groq_client)
50
+ assert provider.client == groq_client
51
+
52
+
53
+ def test_groq_provider_with_env_base_url(monkeypatch: pytest.MonkeyPatch) -> None:
54
+ # Test with environment variable for base_url
55
+ monkeypatch.setenv('GROQ_BASE_URL', 'https://custom.groq.com/v1')
56
+ provider = GroqProvider(api_key='api-key')
57
+ assert provider.base_url == 'https://custom.groq.com/v1'
58
+
59
+
60
+ def test_infer_groq_provider():
61
+ with patch.dict(os.environ, {'GROQ_API_KEY': 'test-api-key'}, clear=False):
62
+ from pydantic_ai.providers import infer_provider
63
+
64
+ provider = infer_provider('groq')
65
+ assert provider.name == 'groq'
66
+ assert isinstance(provider, GroqProvider)
@@ -14,6 +14,7 @@ with try_import() as imports_successful:
14
14
  from pydantic_ai.providers.deepseek import DeepSeekProvider
15
15
  from pydantic_ai.providers.google_gla import GoogleGLAProvider
16
16
  from pydantic_ai.providers.google_vertex import GoogleVertexProvider
17
+ from pydantic_ai.providers.groq import GroqProvider
17
18
  from pydantic_ai.providers.openai import OpenAIProvider
18
19
 
19
20
  test_infer_provider_params = [
@@ -21,6 +22,7 @@ with try_import() as imports_successful:
21
22
  ('openai', OpenAIProvider, None),
22
23
  ('google-vertex', GoogleVertexProvider, None),
23
24
  ('google-gla', GoogleGLAProvider, 'GEMINI_API_KEY'),
25
+ ('groq', GroqProvider, 'GROQ_API_KEY'),
24
26
  ]
25
27
 
26
28
  if not imports_successful():
@@ -50,8 +50,9 @@ def vertexai(http_client: httpx.AsyncClient, tmp_path: Path) -> Model:
50
50
 
51
51
  def groq(http_client: httpx.AsyncClient, _tmp_path: Path) -> Model:
52
52
  from pydantic_ai.models.groq import GroqModel
53
+ from pydantic_ai.providers.groq import GroqProvider
53
54
 
54
- return GroqModel('llama-3.3-70b-versatile', http_client=http_client)
55
+ return GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(http_client=http_client))
55
56
 
56
57
 
57
58
  def anthropic(http_client: httpx.AsyncClient, _tmp_path: Path) -> Model:
File without changes
File without changes
File without changes