pydantic-ai 0.0.40__tar.gz → 0.0.42__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai might be problematic. Click here for more details.

Files changed (99) hide show
  1. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/.gitignore +1 -1
  2. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/Makefile +9 -0
  3. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/PKG-INFO +3 -3
  4. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/pyproject.toml +17 -7
  5. pydantic_ai-0.0.42/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +205 -0
  6. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/test_graph.py +1 -1
  7. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/test_persistence.py +1 -1
  8. pydantic_ai-0.0.42/tests/mcp_server.py +19 -0
  9. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_anthropic.py +33 -16
  10. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_fallback.py +76 -0
  11. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_mistral.py +33 -26
  12. pydantic_ai-0.0.42/tests/providers/test_anthropic.py +56 -0
  13. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_groq.py +0 -9
  14. pydantic_ai-0.0.42/tests/providers/test_mistral.py +58 -0
  15. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_provider_names.py +5 -1
  16. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_examples.py +25 -3
  17. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_live.py +4 -2
  18. pydantic_ai-0.0.42/tests/test_mcp.py +93 -0
  19. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_tools.py +61 -16
  20. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/LICENSE +0 -0
  21. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/README.md +0 -0
  22. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/__init__.py +0 -0
  23. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/assets/dummy.pdf +0 -0
  24. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/assets/kiwi.png +0 -0
  25. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/assets/marcelo.mp3 +0 -0
  26. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/conftest.py +0 -0
  27. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/example_modules/README.md +0 -0
  28. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/example_modules/bank_database.py +0 -0
  29. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/example_modules/fake_database.py +0 -0
  30. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/example_modules/weather_service.py +0 -0
  31. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/__init__.py +0 -0
  32. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/test_file_persistence.py +0 -0
  33. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/test_mermaid.py +0 -0
  34. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/test_state.py +0 -0
  35. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/graph/test_utils.py +0 -0
  36. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/import_examples.py +0 -0
  37. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/json_body_serializer.py +0 -0
  38. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/__init__.py +0 -0
  39. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
  40. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
  41. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  42. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  43. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  44. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
  45. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
  46. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
  47. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
  48. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
  49. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
  50. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
  51. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
  52. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
  53. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
  54. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
  55. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
  56. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
  57. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
  58. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
  59. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  60. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  61. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  62. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  63. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  64. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
  65. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  66. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  67. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  68. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/mock_async_stream.py +0 -0
  69. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_bedrock.py +0 -0
  70. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_cohere.py +0 -0
  71. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_gemini.py +0 -0
  72. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_groq.py +0 -0
  73. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_instrumented.py +0 -0
  74. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_model.py +0 -0
  75. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_model_function.py +0 -0
  76. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_model_names.py +0 -0
  77. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_model_test.py +0 -0
  78. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_openai.py +0 -0
  79. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/models/test_vertexai.py +0 -0
  80. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/__init__.py +0 -0
  81. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
  82. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_azure.py +0 -0
  83. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_bedrock.py +0 -0
  84. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_deepseek.py +0 -0
  85. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_google_gla.py +0 -0
  86. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/providers/test_google_vertex.py +0 -0
  87. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_agent.py +0 -0
  88. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_cli.py +0 -0
  89. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_deps.py +0 -0
  90. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_format_as_xml.py +0 -0
  91. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_json_body_serializer.py +0 -0
  92. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_logfire.py +0 -0
  93. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_messages.py +0 -0
  94. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_parts_manager.py +0 -0
  95. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_streaming.py +0 -0
  96. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_usage_limits.py +0 -0
  97. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/test_utils.py +0 -0
  98. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/typed_agent.py +0 -0
  99. {pydantic_ai-0.0.40 → pydantic_ai-0.0.42}/tests/typed_graph.py +0 -0
@@ -1,5 +1,4 @@
1
1
  site
2
- .python-version
3
2
  .venv
4
3
  dist
5
4
  __pycache__
@@ -16,3 +15,4 @@ examples/pydantic_ai_examples/.chat_app_messages.sqlite
16
15
  .vscode/
17
16
  /question_graph_history.json
18
17
  /docs-site/.wrangler/
18
+ /CLAUDE.md
@@ -27,6 +27,10 @@ lint: ## Lint the code
27
27
  uv run ruff format --check
28
28
  uv run ruff check
29
29
 
30
+ .PHONY: lint-js
31
+ lint-js: ## Lint JS and TS code
32
+ cd mcp-run-python && npm run lint
33
+
30
34
  .PHONY: typecheck-pyright
31
35
  typecheck-pyright:
32
36
  @# PYRIGHT_PYTHON_IGNORE_WARNINGS avoids the overhead of making a request to github on every invocation
@@ -62,6 +66,11 @@ testcov: test ## Run tests and generate a coverage report
62
66
  @echo "building coverage html"
63
67
  @uv run coverage html
64
68
 
69
+ .PHONY: test-mrp
70
+ test-mrp: ## Build and tests of mcp-run-python
71
+ cd mcp-run-python && npm run prepare
72
+ uv run --package mcp-run-python pytest mcp-run-python -v
73
+
65
74
  .PHONY: update-examples
66
75
  update-examples: ## Update documentation examples
67
76
  uv run -m pytest --update-examples tests/test_examples.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.0.40
3
+ Version: 0.0.42
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mistral,openai,vertexai]==0.0.40
31
+ Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mcp,mistral,openai,vertexai]==0.0.42
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.0.40; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.0.42; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=2.3; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai"
7
- version = "0.0.40"
7
+ version = "0.0.42"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs"
9
9
  authors = [
10
10
  { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
@@ -36,7 +36,7 @@ classifiers = [
36
36
  ]
37
37
  requires-python = ">=3.9"
38
38
  dependencies = [
39
- "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli]==0.0.40",
39
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli,mcp]==0.0.42",
40
40
  ]
41
41
 
42
42
  [project.urls]
@@ -46,16 +46,17 @@ Documentation = "https://ai.pydantic.dev"
46
46
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
47
47
 
48
48
  [project.optional-dependencies]
49
- examples = ["pydantic-ai-examples==0.0.40"]
49
+ examples = ["pydantic-ai-examples==0.0.42"]
50
50
  logfire = ["logfire>=2.3"]
51
51
 
52
52
  [tool.uv.sources]
53
53
  pydantic-ai-slim = { workspace = true }
54
54
  pydantic-graph = { workspace = true }
55
55
  pydantic-ai-examples = { workspace = true }
56
+ mcp-run-python = { workspace = true }
56
57
 
57
58
  [tool.uv.workspace]
58
- members = ["pydantic_ai_slim", "pydantic_graph", "examples"]
59
+ members = ["pydantic_ai_slim", "pydantic_graph", "examples", "mcp-run-python"]
59
60
 
60
61
  [dependency-groups]
61
62
  # dev dependencies are defined in `pydantic-ai-slim/pyproject.toml` to allow for minimal testing
@@ -82,6 +83,7 @@ line-length = 120
82
83
  target-version = "py39"
83
84
  include = [
84
85
  "pydantic_ai_slim/**/*.py",
86
+ "mcp-run-python/**/*.py",
85
87
  "pydantic_graph/**/*.py",
86
88
  "examples/**/*.py",
87
89
  "tests/**/*.py",
@@ -116,18 +118,22 @@ quote-style = "single"
116
118
  "tests/**/*.py" = ["D"]
117
119
  "docs/**/*.py" = ["D"]
118
120
  "examples/**/*.py" = ["D101", "D103"]
121
+ "mcp-run-python/**/*.py" = ["D", "TID251"]
119
122
 
120
123
  [tool.pyright]
124
+ pythonVersion = "3.12"
121
125
  typeCheckingMode = "strict"
122
126
  reportMissingTypeStubs = false
123
127
  reportUnnecessaryIsInstance = false
124
128
  reportUnnecessaryTypeIgnoreComment = true
125
- include = ["pydantic_ai_slim", "pydantic_graph", "tests", "examples"]
129
+ reportMissingModuleSource = false
130
+ include = ["pydantic_ai_slim", "mcp-run-python", "pydantic_graph", "tests", "examples"]
126
131
  venvPath = ".venv"
127
132
  # see https://github.com/microsoft/pyright/issues/7771 - we don't want to error on decorated functions in tests
128
133
  # which are not otherwise used
129
134
  executionEnvironments = [{ root = "tests", reportUnusedFunction = false }]
130
- exclude = ["examples/pydantic_ai_examples/weather_agent_gradio.py"]
135
+ exclude = ["examples/pydantic_ai_examples/weather_agent_gradio.py", "mcp-run-python/node_modules"]
136
+ extraPaths = ["mcp-run-python/stubs"]
131
137
 
132
138
  [tool.mypy]
133
139
  files = "tests/typed_agent.py,tests/typed_graph.py"
@@ -139,7 +145,11 @@ xfail_strict = true
139
145
  filterwarnings = [
140
146
  "error",
141
147
  # boto3
142
- "ignore::DeprecationWarning:botocore.*"
148
+ "ignore::DeprecationWarning:botocore.*",
149
+ "ignore::RuntimeWarning:pydantic_ai.mcp",
150
+ # uvicorn (mcp server)
151
+ "ignore:websockets.legacy is deprecated.*:DeprecationWarning:websockets.legacy",
152
+ "ignore:websockets.server.WebSocketServerProtocol is deprecated:DeprecationWarning"
143
153
  ]
144
154
 
145
155
  # https://coverage.readthedocs.io/en/latest/config.html#run
@@ -0,0 +1,205 @@
1
+ interactions:
2
+ - request:
3
+ headers:
4
+ accept:
5
+ - application/json
6
+ accept-encoding:
7
+ - gzip, deflate
8
+ connection:
9
+ - keep-alive
10
+ content-length:
11
+ - '530'
12
+ content-type:
13
+ - application/json
14
+ host:
15
+ - api.openai.com
16
+ method: POST
17
+ parsed_body:
18
+ messages:
19
+ - content: What is 0 degrees Celsius in Fahrenheit?
20
+ role: user
21
+ model: gpt-4o
22
+ n: 1
23
+ stream: false
24
+ tool_choice: auto
25
+ tools:
26
+ - function:
27
+ description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n
28
+ \ Temperature in Fahrenheit\n "
29
+ name: celsius_to_fahrenheit
30
+ parameters:
31
+ properties:
32
+ celsius:
33
+ title: Celsius
34
+ type: number
35
+ required:
36
+ - celsius
37
+ title: celsius_to_fahrenheitArguments
38
+ type: object
39
+ type: function
40
+ uri: https://api.openai.com/v1/chat/completions
41
+ response:
42
+ headers:
43
+ access-control-expose-headers:
44
+ - X-Request-ID
45
+ alt-svc:
46
+ - h3=":443"; ma=86400
47
+ connection:
48
+ - keep-alive
49
+ content-length:
50
+ - '1085'
51
+ content-type:
52
+ - application/json
53
+ openai-organization:
54
+ - pydantic-28gund
55
+ openai-processing-ms:
56
+ - '594'
57
+ openai-version:
58
+ - '2020-10-01'
59
+ strict-transport-security:
60
+ - max-age=31536000; includeSubDomains; preload
61
+ transfer-encoding:
62
+ - chunked
63
+ parsed_body:
64
+ choices:
65
+ - finish_reason: tool_calls
66
+ index: 0
67
+ logprobs: null
68
+ message:
69
+ annotations: []
70
+ content: null
71
+ refusal: null
72
+ role: assistant
73
+ tool_calls:
74
+ - function:
75
+ arguments: '{"celsius":0}'
76
+ name: celsius_to_fahrenheit
77
+ id: call_UNesABTXfwIkYdh3HzXWw2wD
78
+ type: function
79
+ created: 1741776872
80
+ id: chatcmpl-BAE1IwTZc7FHM4TkNeBoPylR7rtCT
81
+ model: gpt-4o-2024-08-06
82
+ object: chat.completion
83
+ service_tier: default
84
+ system_fingerprint: fp_eb9dce56a8
85
+ usage:
86
+ completion_tokens: 19
87
+ completion_tokens_details:
88
+ accepted_prediction_tokens: 0
89
+ audio_tokens: 0
90
+ reasoning_tokens: 0
91
+ rejected_prediction_tokens: 0
92
+ prompt_tokens: 82
93
+ prompt_tokens_details:
94
+ audio_tokens: 0
95
+ cached_tokens: 0
96
+ total_tokens: 101
97
+ status:
98
+ code: 200
99
+ message: OK
100
+ - request:
101
+ headers:
102
+ accept:
103
+ - application/json
104
+ accept-encoding:
105
+ - gzip, deflate
106
+ connection:
107
+ - keep-alive
108
+ content-length:
109
+ - '879'
110
+ content-type:
111
+ - application/json
112
+ cookie:
113
+ - __cf_bm=GeO8TCYhlEUIV63eLxM4nKUU2OLlG.f8tMvM9shFTc8-1741776873-1.0.1.1-zxkkWGCAPhJIA05Uwt3Ii3DCg9da6owy45bo_yaZ1YmsoihITJCgZzpA6H4eL0xzFRDWrWkEIQYaFEXLYcrLePwDMsgwNUJbEf6sg1vm2YQ;
114
+ _cfuvid=AI06nwzbBcwVRHXv_BRehX1K7p9oe1qUXFkzXBWEUW0-1741776873043-0.0.1.1-604800000
115
+ host:
116
+ - api.openai.com
117
+ method: POST
118
+ parsed_body:
119
+ messages:
120
+ - content: What is 0 degrees Celsius in Fahrenheit?
121
+ role: user
122
+ - role: assistant
123
+ tool_calls:
124
+ - function:
125
+ arguments: '{"celsius":0}'
126
+ name: celsius_to_fahrenheit
127
+ id: call_UNesABTXfwIkYdh3HzXWw2wD
128
+ type: function
129
+ - content: '{"meta":null,"content":[{"type":"text","text":"32.0","annotations":null}],"isError":false}'
130
+ role: tool
131
+ tool_call_id: call_UNesABTXfwIkYdh3HzXWw2wD
132
+ model: gpt-4o
133
+ n: 1
134
+ stream: false
135
+ tool_choice: auto
136
+ tools:
137
+ - function:
138
+ description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n
139
+ \ Temperature in Fahrenheit\n "
140
+ name: celsius_to_fahrenheit
141
+ parameters:
142
+ properties:
143
+ celsius:
144
+ title: Celsius
145
+ type: number
146
+ required:
147
+ - celsius
148
+ title: celsius_to_fahrenheitArguments
149
+ type: object
150
+ type: function
151
+ uri: https://api.openai.com/v1/chat/completions
152
+ response:
153
+ headers:
154
+ access-control-expose-headers:
155
+ - X-Request-ID
156
+ alt-svc:
157
+ - h3=":443"; ma=86400
158
+ connection:
159
+ - keep-alive
160
+ content-length:
161
+ - '849'
162
+ content-type:
163
+ - application/json
164
+ openai-organization:
165
+ - pydantic-28gund
166
+ openai-processing-ms:
167
+ - '415'
168
+ openai-version:
169
+ - '2020-10-01'
170
+ strict-transport-security:
171
+ - max-age=31536000; includeSubDomains; preload
172
+ transfer-encoding:
173
+ - chunked
174
+ parsed_body:
175
+ choices:
176
+ - finish_reason: stop
177
+ index: 0
178
+ logprobs: null
179
+ message:
180
+ annotations: []
181
+ content: 0 degrees Celsius is 32.0 degrees Fahrenheit.
182
+ refusal: null
183
+ role: assistant
184
+ created: 1741776873
185
+ id: chatcmpl-BAE1Jy3AN974xW1pziTxd6wrxliCE
186
+ model: gpt-4o-2024-08-06
187
+ object: chat.completion
188
+ service_tier: default
189
+ system_fingerprint: fp_eb9dce56a8
190
+ usage:
191
+ completion_tokens: 13
192
+ completion_tokens_details:
193
+ accepted_prediction_tokens: 0
194
+ audio_tokens: 0
195
+ reasoning_tokens: 0
196
+ rejected_prediction_tokens: 0
197
+ prompt_tokens: 139
198
+ prompt_tokens_details:
199
+ audio_tokens: 0
200
+ cached_tokens: 0
201
+ total_tokens: 152
202
+ status:
203
+ code: 200
204
+ message: OK
205
+ version: 1
@@ -393,7 +393,7 @@ async def test_iter_next_error(mock_snapshot_id: object):
393
393
 
394
394
  assert isinstance(n, BaseNode)
395
395
  n = await run.next()
396
- assert n == snapshot(End(None))
396
+ assert n == snapshot(End(data=None))
397
397
 
398
398
  with pytest.raises(TypeError, match=r'`next` must be called with a `BaseNode` instance, got End\(data=None\).'):
399
399
  await run.next()
@@ -287,7 +287,7 @@ async def test_rerun_node(mock_snapshot_id: object):
287
287
  node = Foo()
288
288
  async with graph.iter(node, persistence=sp) as run:
289
289
  end = await run.next()
290
- assert end == snapshot(End(123))
290
+ assert end == snapshot(End(data=123))
291
291
 
292
292
  msg = "Incorrect snapshot status 'success', must be 'created' or 'pending'."
293
293
  with pytest.raises(GraphNodeStatusError, match=msg):
@@ -0,0 +1,19 @@
1
+ from mcp.server.fastmcp import FastMCP
2
+
3
+ mcp = FastMCP('PydanticAI MCP Server')
4
+
5
+
6
+ @mcp.tool()
7
+ async def celsius_to_fahrenheit(celsius: float) -> float:
8
+ """Convert Celsius to Fahrenheit.
9
+
10
+ Args:
11
+ celsius: Temperature in Celsius
12
+
13
+ Returns:
14
+ Temperature in Fahrenheit
15
+ """
16
+ return (celsius * 9 / 5) + 32
17
+
18
+
19
+ mcp.run()
@@ -7,6 +7,7 @@ from dataclasses import dataclass, field
7
7
  from datetime import timezone
8
8
  from functools import cached_property
9
9
  from typing import Any, TypeVar, Union, cast
10
+ from unittest.mock import patch
10
11
 
11
12
  import httpx
12
13
  import pytest
@@ -53,6 +54,7 @@ with try_import() as imports_successful:
53
54
  from anthropic.types.raw_message_delta_event import Delta
54
55
 
55
56
  from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings
57
+ from pydantic_ai.providers.anthropic import AnthropicProvider
56
58
 
57
59
  # note: we use Union here so that casting works with Python 3.9
58
60
  MockAnthropicMessage = Union[AnthropicMessage, Exception]
@@ -68,7 +70,7 @@ T = TypeVar('T')
68
70
 
69
71
 
70
72
  def test_init():
71
- m = AnthropicModel('claude-3-5-haiku-latest', api_key='foobar')
73
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(api_key='foobar'))
72
74
  assert m.client.api_key == 'foobar'
73
75
  assert m.model_name == 'claude-3-5-haiku-latest'
74
76
  assert m.system == 'anthropic'
@@ -81,6 +83,7 @@ class MockAnthropic:
81
83
  stream: Sequence[MockRawMessageStreamEvent] | Sequence[Sequence[MockRawMessageStreamEvent]] | None = None
82
84
  index = 0
83
85
  chat_completion_kwargs: list[dict[str, Any]] = field(default_factory=list)
86
+ base_url: str | None = None
84
87
 
85
88
  @cached_property
86
89
  def messages(self) -> Any:
@@ -134,7 +137,7 @@ def completion_message(content: list[ContentBlock], usage: AnthropicUsage) -> An
134
137
  async def test_sync_request_text_response(allow_model_requests: None):
135
138
  c = completion_message([TextBlock(text='world', type='text')], AnthropicUsage(input_tokens=5, output_tokens=10))
136
139
  mock_client = MockAnthropic.create_mock(c)
137
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
140
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
138
141
  agent = Agent(m)
139
142
 
140
143
  result = await agent.run('hello')
@@ -171,7 +174,7 @@ async def test_async_request_text_response(allow_model_requests: None):
171
174
  usage=AnthropicUsage(input_tokens=3, output_tokens=5),
172
175
  )
173
176
  mock_client = MockAnthropic.create_mock(c)
174
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
177
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
175
178
  agent = Agent(m)
176
179
 
177
180
  result = await agent.run('hello')
@@ -185,7 +188,7 @@ async def test_request_structured_response(allow_model_requests: None):
185
188
  usage=AnthropicUsage(input_tokens=3, output_tokens=5),
186
189
  )
187
190
  mock_client = MockAnthropic.create_mock(c)
188
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
191
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
189
192
  agent = Agent(m, result_type=list[int])
190
193
 
191
194
  result = await agent.run('hello')
@@ -235,7 +238,7 @@ async def test_request_tool_call(allow_model_requests: None):
235
238
  ]
236
239
 
237
240
  mock_client = MockAnthropic.create_mock(responses)
238
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
241
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
239
242
  agent = Agent(m, system_prompt='this is the system prompt')
240
243
 
241
244
  @agent.tool_plain
@@ -327,7 +330,7 @@ async def test_parallel_tool_calls(allow_model_requests: None, parallel_tool_cal
327
330
  ]
328
331
 
329
332
  mock_client = MockAnthropic.create_mock(responses)
330
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
333
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
331
334
  agent = Agent(m, model_settings=ModelSettings(parallel_tool_calls=parallel_tool_calls))
332
335
 
333
336
  @agent.tool_plain
@@ -366,7 +369,7 @@ async def test_multiple_parallel_tool_calls(allow_model_requests: None):
366
369
  # However, we do want to use the environment variable if present when rewriting VCR cassettes.
367
370
  api_key = os.environ.get('ANTHROPIC_API_KEY', 'mock-value')
368
371
  agent = Agent(
369
- AnthropicModel('claude-3-5-haiku-latest', api_key=api_key),
372
+ AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(api_key=api_key)),
370
373
  system_prompt=system_prompt,
371
374
  tools=[retrieve_entity_info],
372
375
  )
@@ -436,7 +439,7 @@ async def test_multiple_parallel_tool_calls(allow_model_requests: None):
436
439
  async def test_anthropic_specific_metadata(allow_model_requests: None) -> None:
437
440
  c = completion_message([TextBlock(text='world', type='text')], AnthropicUsage(input_tokens=5, output_tokens=10))
438
441
  mock_client = MockAnthropic.create_mock(c)
439
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
442
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
440
443
  agent = Agent(m)
441
444
 
442
445
  result = await agent.run('hello', model_settings=AnthropicModelSettings(anthropic_metadata={'user_id': '123'}))
@@ -525,7 +528,7 @@ async def test_stream_structured(allow_model_requests: None):
525
528
  ]
526
529
 
527
530
  mock_client = MockAnthropic.create_stream_mock([stream, done_stream])
528
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
531
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
529
532
  agent = Agent(m)
530
533
 
531
534
  tool_called = False
@@ -555,7 +558,7 @@ async def test_stream_structured(allow_model_requests: None):
555
558
 
556
559
  @pytest.mark.vcr()
557
560
  async def test_image_url_input(allow_model_requests: None, anthropic_api_key: str):
558
- m = AnthropicModel('claude-3-5-haiku-latest', api_key=anthropic_api_key)
561
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
559
562
  agent = Agent(m)
560
563
 
561
564
  result = await agent.run(
@@ -573,7 +576,7 @@ Potatoes are root vegetables that are staple foods in many cuisines around the w
573
576
 
574
577
  @pytest.mark.vcr()
575
578
  async def test_image_url_input_invalid_mime_type(allow_model_requests: None, anthropic_api_key: str):
576
- m = AnthropicModel('claude-3-5-haiku-latest', api_key=anthropic_api_key)
579
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
577
580
  agent = Agent(m)
578
581
 
579
582
  result = await agent.run(
@@ -593,7 +596,7 @@ async def test_image_url_input_invalid_mime_type(allow_model_requests: None, ant
593
596
  async def test_audio_as_binary_content_input(allow_model_requests: None, media_type: str):
594
597
  c = completion_message([TextBlock(text='world', type='text')], AnthropicUsage(input_tokens=5, output_tokens=10))
595
598
  mock_client = MockAnthropic.create_mock(c)
596
- m = AnthropicModel('claude-3-5-haiku-latest', anthropic_client=mock_client)
599
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(anthropic_client=mock_client))
597
600
  agent = Agent(m)
598
601
 
599
602
  base64_content = b'//uQZ'
@@ -610,7 +613,7 @@ def test_model_status_error(allow_model_requests: None) -> None:
610
613
  body={'error': 'test error'},
611
614
  )
612
615
  )
613
- m = AnthropicModel('claude-3-5-sonnet-latest', anthropic_client=mock_client)
616
+ m = AnthropicModel('claude-3-5-sonnet-latest', provider=AnthropicProvider(anthropic_client=mock_client))
614
617
  agent = Agent(m)
615
618
  with pytest.raises(ModelHTTPError) as exc_info:
616
619
  agent.run_sync('hello')
@@ -623,7 +626,7 @@ def test_model_status_error(allow_model_requests: None) -> None:
623
626
  async def test_document_binary_content_input(
624
627
  allow_model_requests: None, anthropic_api_key: str, document_content: BinaryContent
625
628
  ):
626
- m = AnthropicModel('claude-3-5-sonnet-latest', api_key=anthropic_api_key)
629
+ m = AnthropicModel('claude-3-5-sonnet-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
627
630
  agent = Agent(m)
628
631
 
629
632
  result = await agent.run(['What is the main content on this document?', document_content])
@@ -634,7 +637,7 @@ async def test_document_binary_content_input(
634
637
 
635
638
  @pytest.mark.vcr()
636
639
  async def test_document_url_input(allow_model_requests: None, anthropic_api_key: str):
637
- m = AnthropicModel('claude-3-5-sonnet-latest', api_key=anthropic_api_key)
640
+ m = AnthropicModel('claude-3-5-sonnet-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
638
641
  agent = Agent(m)
639
642
 
640
643
  document_url = DocumentUrl(url='https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf')
@@ -647,7 +650,7 @@ async def test_document_url_input(allow_model_requests: None, anthropic_api_key:
647
650
 
648
651
  @pytest.mark.vcr()
649
652
  async def test_text_document_url_input(allow_model_requests: None, anthropic_api_key: str):
650
- m = AnthropicModel('claude-3-5-sonnet-latest', api_key=anthropic_api_key)
653
+ m = AnthropicModel('claude-3-5-sonnet-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
651
654
  agent = Agent(m)
652
655
 
653
656
  text_document_url = DocumentUrl(url='https://example-files.online-convert.com/document/txt/example.txt')
@@ -668,3 +671,17 @@ This document is a TXT test file that primarily contains information about the u
668
671
 
669
672
  The document is formatted as a test file with metadata including its purpose, file type, and version. It also includes attribution information indicating the content is from Wikipedia and is licensed under Attribution-ShareAlike 4.0.\
670
673
  """)
674
+
675
+
676
+ def test_init_with_provider():
677
+ provider = AnthropicProvider(api_key='api-key')
678
+ model = AnthropicModel('claude-3-opus-latest', provider=provider)
679
+ assert model.model_name == 'claude-3-opus-latest'
680
+ assert model.client == provider.client
681
+
682
+
683
+ def test_init_with_provider_string():
684
+ with patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'env-api-key'}, clear=False):
685
+ model = AnthropicModel('claude-3-opus-latest', provider='anthropic')
686
+ assert model.model_name == 'claude-3-opus-latest'
687
+ assert model.client is not None
@@ -185,6 +185,82 @@ def test_first_failed_instrumented(capfire: CaptureLogfire) -> None:
185
185
  )
186
186
 
187
187
 
188
+ @pytest.mark.skipif(not logfire_imports_successful(), reason='logfire not installed')
189
+ async def test_first_failed_instrumented_stream(capfire: CaptureLogfire) -> None:
190
+ fallback_model = FallbackModel(failure_model_stream, success_model_stream)
191
+ agent = Agent(model=fallback_model, instrument=True)
192
+ async with agent.run_stream('input') as result:
193
+ assert [c async for c, _is_last in result.stream_structured(debounce_by=None)] == snapshot(
194
+ [
195
+ ModelResponse(
196
+ parts=[TextPart(content='hello ')],
197
+ model_name='function::success_response_stream',
198
+ timestamp=IsNow(tz=timezone.utc),
199
+ ),
200
+ ModelResponse(
201
+ parts=[TextPart(content='hello world')],
202
+ model_name='function::success_response_stream',
203
+ timestamp=IsNow(tz=timezone.utc),
204
+ ),
205
+ ModelResponse(
206
+ parts=[TextPart(content='hello world')],
207
+ model_name='function::success_response_stream',
208
+ timestamp=IsNow(tz=timezone.utc),
209
+ ),
210
+ ]
211
+ )
212
+ assert result.is_complete
213
+
214
+ assert capfire.exporter.exported_spans_as_dict() == snapshot(
215
+ [
216
+ {
217
+ 'name': 'preparing model request params',
218
+ 'context': {'trace_id': 1, 'span_id': 3, 'is_remote': False},
219
+ 'parent': {'trace_id': 1, 'span_id': 1, 'is_remote': False},
220
+ 'start_time': 2000000000,
221
+ 'end_time': 3000000000,
222
+ 'attributes': {
223
+ 'run_step': 1,
224
+ 'logfire.span_type': 'span',
225
+ 'logfire.msg': 'preparing model request params',
226
+ },
227
+ },
228
+ {
229
+ 'name': 'chat function::success_response_stream',
230
+ 'context': {'trace_id': 1, 'span_id': 5, 'is_remote': False},
231
+ 'parent': {'trace_id': 1, 'span_id': 1, 'is_remote': False},
232
+ 'start_time': 4000000000,
233
+ 'end_time': 5000000000,
234
+ 'attributes': {
235
+ 'gen_ai.operation.name': 'chat',
236
+ 'logfire.span_type': 'span',
237
+ 'logfire.msg': 'chat fallback:function::failure_response_stream,function::success_response_stream',
238
+ 'gen_ai.system': 'function',
239
+ 'gen_ai.request.model': 'function::success_response_stream',
240
+ 'gen_ai.usage.input_tokens': 50,
241
+ 'gen_ai.usage.output_tokens': 2,
242
+ 'gen_ai.response.model': 'function::success_response_stream',
243
+ 'events': '[{"content": "input", "role": "user", "gen_ai.system": "function", "gen_ai.message.index": 0, "event.name": "gen_ai.user.message"}, {"index": 0, "message": {"role": "assistant", "content": "hello world"}, "gen_ai.system": "function", "event.name": "gen_ai.choice"}]',
244
+ 'logfire.json_schema': '{"type": "object", "properties": {"events": {"type": "array"}}}',
245
+ },
246
+ },
247
+ {
248
+ 'name': 'agent run',
249
+ 'context': {'trace_id': 1, 'span_id': 1, 'is_remote': False},
250
+ 'parent': None,
251
+ 'start_time': 1000000000,
252
+ 'end_time': 6000000000,
253
+ 'attributes': {
254
+ 'model_name': 'fallback:function::failure_response_stream,function::success_response_stream',
255
+ 'agent_name': 'agent',
256
+ 'logfire.msg': 'agent run',
257
+ 'logfire.span_type': 'span',
258
+ },
259
+ },
260
+ ]
261
+ )
262
+
263
+
188
264
  def test_all_failed() -> None:
189
265
  fallback_model = FallbackModel(failure_model, failure_model)
190
266
  agent = Agent(model=fallback_model)