pydantic-ai 0.0.41__tar.gz → 0.0.43__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/.gitignore +1 -1
  2. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/Makefile +9 -0
  3. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/PKG-INFO +3 -3
  4. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/pyproject.toml +17 -7
  5. pydantic_ai-0.0.43/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +205 -0
  6. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/test_graph.py +1 -1
  7. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/test_persistence.py +1 -1
  8. pydantic_ai-0.0.43/tests/mcp_server.py +19 -0
  9. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_anthropic.py +1 -1
  10. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_bedrock.py +12 -3
  11. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_cohere.py +1 -1
  12. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_gemini.py +1 -1
  13. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_groq.py +1 -1
  14. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_mistral.py +6 -6
  15. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_model_function.py +1 -1
  16. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_openai.py +1 -1
  17. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_google_vertex.py +6 -5
  18. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_agent.py +15 -10
  19. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_examples.py +25 -3
  20. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_logfire.py +2 -2
  21. pydantic_ai-0.0.43/tests/test_mcp.py +93 -0
  22. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_tools.py +61 -16
  23. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/LICENSE +0 -0
  24. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/README.md +0 -0
  25. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/__init__.py +0 -0
  26. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/assets/dummy.pdf +0 -0
  27. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/assets/kiwi.png +0 -0
  28. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/assets/marcelo.mp3 +0 -0
  29. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/conftest.py +0 -0
  30. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/example_modules/README.md +0 -0
  31. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/example_modules/bank_database.py +0 -0
  32. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/example_modules/fake_database.py +0 -0
  33. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/example_modules/weather_service.py +0 -0
  34. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/__init__.py +0 -0
  35. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/test_file_persistence.py +0 -0
  36. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/test_mermaid.py +0 -0
  37. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/test_state.py +0 -0
  38. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/graph/test_utils.py +0 -0
  39. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/import_examples.py +0 -0
  40. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/json_body_serializer.py +0 -0
  41. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/__init__.py +0 -0
  42. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
  43. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
  44. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  45. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  46. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  47. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
  48. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
  49. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
  50. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
  51. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
  52. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
  53. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
  54. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
  55. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
  56. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
  57. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
  58. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
  59. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
  60. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
  61. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
  62. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  63. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  64. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  65. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  66. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  67. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
  68. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  69. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  70. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  71. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/mock_async_stream.py +0 -0
  72. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_fallback.py +0 -0
  73. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_instrumented.py +0 -0
  74. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_model.py +0 -0
  75. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_model_names.py +0 -0
  76. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_model_test.py +0 -0
  77. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/models/test_vertexai.py +0 -0
  78. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/__init__.py +0 -0
  79. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
  80. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_anthropic.py +0 -0
  81. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_azure.py +0 -0
  82. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_bedrock.py +0 -0
  83. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_deepseek.py +0 -0
  84. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_google_gla.py +0 -0
  85. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_groq.py +0 -0
  86. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_mistral.py +0 -0
  87. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/providers/test_provider_names.py +0 -0
  88. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_cli.py +0 -0
  89. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_deps.py +0 -0
  90. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_format_as_xml.py +0 -0
  91. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_json_body_serializer.py +0 -0
  92. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_live.py +0 -0
  93. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_messages.py +0 -0
  94. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_parts_manager.py +0 -0
  95. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_streaming.py +0 -0
  96. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_usage_limits.py +0 -0
  97. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/test_utils.py +0 -0
  98. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/typed_agent.py +0 -0
  99. {pydantic_ai-0.0.41 → pydantic_ai-0.0.43}/tests/typed_graph.py +0 -0
@@ -1,5 +1,4 @@
1
1
  site
2
- .python-version
3
2
  .venv
4
3
  dist
5
4
  __pycache__
@@ -16,3 +15,4 @@ examples/pydantic_ai_examples/.chat_app_messages.sqlite
16
15
  .vscode/
17
16
  /question_graph_history.json
18
17
  /docs-site/.wrangler/
18
+ /CLAUDE.md
@@ -27,6 +27,10 @@ lint: ## Lint the code
27
27
  uv run ruff format --check
28
28
  uv run ruff check
29
29
 
30
+ .PHONY: lint-js
31
+ lint-js: ## Lint JS and TS code
32
+ cd mcp-run-python && npm run lint
33
+
30
34
  .PHONY: typecheck-pyright
31
35
  typecheck-pyright:
32
36
  @# PYRIGHT_PYTHON_IGNORE_WARNINGS avoids the overhead of making a request to github on every invocation
@@ -62,6 +66,11 @@ testcov: test ## Run tests and generate a coverage report
62
66
  @echo "building coverage html"
63
67
  @uv run coverage html
64
68
 
69
+ .PHONY: test-mrp
70
+ test-mrp: ## Build and tests of mcp-run-python
71
+ cd mcp-run-python && npm run prepare
72
+ uv run --package mcp-run-python pytest mcp-run-python -v
73
+
65
74
  .PHONY: update-examples
66
75
  update-examples: ## Update documentation examples
67
76
  uv run -m pytest --update-examples tests/test_examples.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.0.41
3
+ Version: 0.0.43
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mistral,openai,vertexai]==0.0.41
31
+ Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,groq,mcp,mistral,openai,vertexai]==0.0.43
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.0.41; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.0.43; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=2.3; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai"
7
- version = "0.0.41"
7
+ version = "0.0.43"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs"
9
9
  authors = [
10
10
  { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
@@ -36,7 +36,7 @@ classifiers = [
36
36
  ]
37
37
  requires-python = ">=3.9"
38
38
  dependencies = [
39
- "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli]==0.0.41",
39
+ "pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli,mcp]==0.0.43",
40
40
  ]
41
41
 
42
42
  [project.urls]
@@ -46,16 +46,17 @@ Documentation = "https://ai.pydantic.dev"
46
46
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
47
47
 
48
48
  [project.optional-dependencies]
49
- examples = ["pydantic-ai-examples==0.0.41"]
49
+ examples = ["pydantic-ai-examples==0.0.43"]
50
50
  logfire = ["logfire>=2.3"]
51
51
 
52
52
  [tool.uv.sources]
53
53
  pydantic-ai-slim = { workspace = true }
54
54
  pydantic-graph = { workspace = true }
55
55
  pydantic-ai-examples = { workspace = true }
56
+ mcp-run-python = { workspace = true }
56
57
 
57
58
  [tool.uv.workspace]
58
- members = ["pydantic_ai_slim", "pydantic_graph", "examples"]
59
+ members = ["pydantic_ai_slim", "pydantic_graph", "examples", "mcp-run-python"]
59
60
 
60
61
  [dependency-groups]
61
62
  # dev dependencies are defined in `pydantic-ai-slim/pyproject.toml` to allow for minimal testing
@@ -82,6 +83,7 @@ line-length = 120
82
83
  target-version = "py39"
83
84
  include = [
84
85
  "pydantic_ai_slim/**/*.py",
86
+ "mcp-run-python/**/*.py",
85
87
  "pydantic_graph/**/*.py",
86
88
  "examples/**/*.py",
87
89
  "tests/**/*.py",
@@ -116,18 +118,22 @@ quote-style = "single"
116
118
  "tests/**/*.py" = ["D"]
117
119
  "docs/**/*.py" = ["D"]
118
120
  "examples/**/*.py" = ["D101", "D103"]
121
+ "mcp-run-python/**/*.py" = ["D", "TID251"]
119
122
 
120
123
  [tool.pyright]
124
+ pythonVersion = "3.12"
121
125
  typeCheckingMode = "strict"
122
126
  reportMissingTypeStubs = false
123
127
  reportUnnecessaryIsInstance = false
124
128
  reportUnnecessaryTypeIgnoreComment = true
125
- include = ["pydantic_ai_slim", "pydantic_graph", "tests", "examples"]
129
+ reportMissingModuleSource = false
130
+ include = ["pydantic_ai_slim", "mcp-run-python", "pydantic_graph", "tests", "examples"]
126
131
  venvPath = ".venv"
127
132
  # see https://github.com/microsoft/pyright/issues/7771 - we don't want to error on decorated functions in tests
128
133
  # which are not otherwise used
129
134
  executionEnvironments = [{ root = "tests", reportUnusedFunction = false }]
130
- exclude = ["examples/pydantic_ai_examples/weather_agent_gradio.py"]
135
+ exclude = ["examples/pydantic_ai_examples/weather_agent_gradio.py", "mcp-run-python/node_modules"]
136
+ extraPaths = ["mcp-run-python/stubs"]
131
137
 
132
138
  [tool.mypy]
133
139
  files = "tests/typed_agent.py,tests/typed_graph.py"
@@ -139,7 +145,11 @@ xfail_strict = true
139
145
  filterwarnings = [
140
146
  "error",
141
147
  # boto3
142
- "ignore::DeprecationWarning:botocore.*"
148
+ "ignore::DeprecationWarning:botocore.*",
149
+ "ignore::RuntimeWarning:pydantic_ai.mcp",
150
+ # uvicorn (mcp server)
151
+ "ignore:websockets.legacy is deprecated.*:DeprecationWarning:websockets.legacy",
152
+ "ignore:websockets.server.WebSocketServerProtocol is deprecated:DeprecationWarning"
143
153
  ]
144
154
 
145
155
  # https://coverage.readthedocs.io/en/latest/config.html#run
@@ -0,0 +1,205 @@
1
+ interactions:
2
+ - request:
3
+ headers:
4
+ accept:
5
+ - application/json
6
+ accept-encoding:
7
+ - gzip, deflate
8
+ connection:
9
+ - keep-alive
10
+ content-length:
11
+ - '530'
12
+ content-type:
13
+ - application/json
14
+ host:
15
+ - api.openai.com
16
+ method: POST
17
+ parsed_body:
18
+ messages:
19
+ - content: What is 0 degrees Celsius in Fahrenheit?
20
+ role: user
21
+ model: gpt-4o
22
+ n: 1
23
+ stream: false
24
+ tool_choice: auto
25
+ tools:
26
+ - function:
27
+ description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n
28
+ \ Temperature in Fahrenheit\n "
29
+ name: celsius_to_fahrenheit
30
+ parameters:
31
+ properties:
32
+ celsius:
33
+ title: Celsius
34
+ type: number
35
+ required:
36
+ - celsius
37
+ title: celsius_to_fahrenheitArguments
38
+ type: object
39
+ type: function
40
+ uri: https://api.openai.com/v1/chat/completions
41
+ response:
42
+ headers:
43
+ access-control-expose-headers:
44
+ - X-Request-ID
45
+ alt-svc:
46
+ - h3=":443"; ma=86400
47
+ connection:
48
+ - keep-alive
49
+ content-length:
50
+ - '1085'
51
+ content-type:
52
+ - application/json
53
+ openai-organization:
54
+ - pydantic-28gund
55
+ openai-processing-ms:
56
+ - '594'
57
+ openai-version:
58
+ - '2020-10-01'
59
+ strict-transport-security:
60
+ - max-age=31536000; includeSubDomains; preload
61
+ transfer-encoding:
62
+ - chunked
63
+ parsed_body:
64
+ choices:
65
+ - finish_reason: tool_calls
66
+ index: 0
67
+ logprobs: null
68
+ message:
69
+ annotations: []
70
+ content: null
71
+ refusal: null
72
+ role: assistant
73
+ tool_calls:
74
+ - function:
75
+ arguments: '{"celsius":0}'
76
+ name: celsius_to_fahrenheit
77
+ id: call_UNesABTXfwIkYdh3HzXWw2wD
78
+ type: function
79
+ created: 1741776872
80
+ id: chatcmpl-BAE1IwTZc7FHM4TkNeBoPylR7rtCT
81
+ model: gpt-4o-2024-08-06
82
+ object: chat.completion
83
+ service_tier: default
84
+ system_fingerprint: fp_eb9dce56a8
85
+ usage:
86
+ completion_tokens: 19
87
+ completion_tokens_details:
88
+ accepted_prediction_tokens: 0
89
+ audio_tokens: 0
90
+ reasoning_tokens: 0
91
+ rejected_prediction_tokens: 0
92
+ prompt_tokens: 82
93
+ prompt_tokens_details:
94
+ audio_tokens: 0
95
+ cached_tokens: 0
96
+ total_tokens: 101
97
+ status:
98
+ code: 200
99
+ message: OK
100
+ - request:
101
+ headers:
102
+ accept:
103
+ - application/json
104
+ accept-encoding:
105
+ - gzip, deflate
106
+ connection:
107
+ - keep-alive
108
+ content-length:
109
+ - '879'
110
+ content-type:
111
+ - application/json
112
+ cookie:
113
+ - __cf_bm=GeO8TCYhlEUIV63eLxM4nKUU2OLlG.f8tMvM9shFTc8-1741776873-1.0.1.1-zxkkWGCAPhJIA05Uwt3Ii3DCg9da6owy45bo_yaZ1YmsoihITJCgZzpA6H4eL0xzFRDWrWkEIQYaFEXLYcrLePwDMsgwNUJbEf6sg1vm2YQ;
114
+ _cfuvid=AI06nwzbBcwVRHXv_BRehX1K7p9oe1qUXFkzXBWEUW0-1741776873043-0.0.1.1-604800000
115
+ host:
116
+ - api.openai.com
117
+ method: POST
118
+ parsed_body:
119
+ messages:
120
+ - content: What is 0 degrees Celsius in Fahrenheit?
121
+ role: user
122
+ - role: assistant
123
+ tool_calls:
124
+ - function:
125
+ arguments: '{"celsius":0}'
126
+ name: celsius_to_fahrenheit
127
+ id: call_UNesABTXfwIkYdh3HzXWw2wD
128
+ type: function
129
+ - content: '{"meta":null,"content":[{"type":"text","text":"32.0","annotations":null}],"isError":false}'
130
+ role: tool
131
+ tool_call_id: call_UNesABTXfwIkYdh3HzXWw2wD
132
+ model: gpt-4o
133
+ n: 1
134
+ stream: false
135
+ tool_choice: auto
136
+ tools:
137
+ - function:
138
+ description: "Convert Celsius to Fahrenheit.\n\n Args:\n celsius: Temperature in Celsius\n\n Returns:\n
139
+ \ Temperature in Fahrenheit\n "
140
+ name: celsius_to_fahrenheit
141
+ parameters:
142
+ properties:
143
+ celsius:
144
+ title: Celsius
145
+ type: number
146
+ required:
147
+ - celsius
148
+ title: celsius_to_fahrenheitArguments
149
+ type: object
150
+ type: function
151
+ uri: https://api.openai.com/v1/chat/completions
152
+ response:
153
+ headers:
154
+ access-control-expose-headers:
155
+ - X-Request-ID
156
+ alt-svc:
157
+ - h3=":443"; ma=86400
158
+ connection:
159
+ - keep-alive
160
+ content-length:
161
+ - '849'
162
+ content-type:
163
+ - application/json
164
+ openai-organization:
165
+ - pydantic-28gund
166
+ openai-processing-ms:
167
+ - '415'
168
+ openai-version:
169
+ - '2020-10-01'
170
+ strict-transport-security:
171
+ - max-age=31536000; includeSubDomains; preload
172
+ transfer-encoding:
173
+ - chunked
174
+ parsed_body:
175
+ choices:
176
+ - finish_reason: stop
177
+ index: 0
178
+ logprobs: null
179
+ message:
180
+ annotations: []
181
+ content: 0 degrees Celsius is 32.0 degrees Fahrenheit.
182
+ refusal: null
183
+ role: assistant
184
+ created: 1741776873
185
+ id: chatcmpl-BAE1Jy3AN974xW1pziTxd6wrxliCE
186
+ model: gpt-4o-2024-08-06
187
+ object: chat.completion
188
+ service_tier: default
189
+ system_fingerprint: fp_eb9dce56a8
190
+ usage:
191
+ completion_tokens: 13
192
+ completion_tokens_details:
193
+ accepted_prediction_tokens: 0
194
+ audio_tokens: 0
195
+ reasoning_tokens: 0
196
+ rejected_prediction_tokens: 0
197
+ prompt_tokens: 139
198
+ prompt_tokens_details:
199
+ audio_tokens: 0
200
+ cached_tokens: 0
201
+ total_tokens: 152
202
+ status:
203
+ code: 200
204
+ message: OK
205
+ version: 1
@@ -393,7 +393,7 @@ async def test_iter_next_error(mock_snapshot_id: object):
393
393
 
394
394
  assert isinstance(n, BaseNode)
395
395
  n = await run.next()
396
- assert n == snapshot(End(None))
396
+ assert n == snapshot(End(data=None))
397
397
 
398
398
  with pytest.raises(TypeError, match=r'`next` must be called with a `BaseNode` instance, got End\(data=None\).'):
399
399
  await run.next()
@@ -287,7 +287,7 @@ async def test_rerun_node(mock_snapshot_id: object):
287
287
  node = Foo()
288
288
  async with graph.iter(node, persistence=sp) as run:
289
289
  end = await run.next()
290
- assert end == snapshot(End(123))
290
+ assert end == snapshot(End(data=123))
291
291
 
292
292
  msg = "Incorrect snapshot status 'success', must be 'created' or 'pending'."
293
293
  with pytest.raises(GraphNodeStatusError, match=msg):
@@ -0,0 +1,19 @@
1
+ from mcp.server.fastmcp import FastMCP
2
+
3
+ mcp = FastMCP('PydanticAI MCP Server')
4
+
5
+
6
+ @mcp.tool()
7
+ async def celsius_to_fahrenheit(celsius: float) -> float:
8
+ """Convert Celsius to Fahrenheit.
9
+
10
+ Args:
11
+ celsius: Temperature in Celsius
12
+
13
+ Returns:
14
+ Temperature in Fahrenheit
15
+ """
16
+ return (celsius * 9 / 5) + 32
17
+
18
+
19
+ mcp.run()
@@ -254,7 +254,7 @@ async def test_request_tool_call(allow_model_requests: None):
254
254
  [
255
255
  ModelRequest(
256
256
  parts=[
257
- SystemPromptPart(content='this is the system prompt'),
257
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
258
258
  UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)),
259
259
  ]
260
260
  ),
@@ -73,7 +73,10 @@ async def test_bedrock_model(allow_model_requests: None, bedrock_provider: Bedro
73
73
  [
74
74
  ModelRequest(
75
75
  parts=[
76
- SystemPromptPart(content='You are a chatbot.'),
76
+ SystemPromptPart(
77
+ content='You are a chatbot.',
78
+ timestamp=IsDatetime(),
79
+ ),
77
80
  UserPromptPart(
78
81
  content='Hello!',
79
82
  timestamp=IsDatetime(),
@@ -122,7 +125,10 @@ async def test_bedrock_model_structured_response(allow_model_requests: None, bed
122
125
  [
123
126
  ModelRequest(
124
127
  parts=[
125
- SystemPromptPart(content='You are a helpful chatbot.'),
128
+ SystemPromptPart(
129
+ content='You are a helpful chatbot.',
130
+ timestamp=IsDatetime(),
131
+ ),
126
132
  UserPromptPart(
127
133
  content='What was the temperature in London 1st January 2022?',
128
134
  timestamp=IsDatetime(),
@@ -242,7 +248,10 @@ async def test_bedrock_model_retry(allow_model_requests: None, bedrock_provider:
242
248
  [
243
249
  ModelRequest(
244
250
  parts=[
245
- SystemPromptPart(content='You are a helpful chatbot.'),
251
+ SystemPromptPart(
252
+ content='You are a helpful chatbot.',
253
+ timestamp=IsDatetime(),
254
+ ),
246
255
  UserPromptPart(
247
256
  content='What is the capital of France?',
248
257
  timestamp=IsDatetime(),
@@ -259,7 +259,7 @@ async def test_request_tool_call(allow_model_requests: None):
259
259
  [
260
260
  ModelRequest(
261
261
  parts=[
262
- SystemPromptPart(content='this is the system prompt'),
262
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
263
263
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
264
264
  ]
265
265
  ),
@@ -559,7 +559,7 @@ async def test_request_tool_call(get_gemini_client: GetGeminiClient):
559
559
  [
560
560
  ModelRequest(
561
561
  parts=[
562
- SystemPromptPart(content='this is the system prompt'),
562
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
563
563
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
564
564
  ]
565
565
  ),
@@ -273,7 +273,7 @@ async def test_request_tool_call(allow_model_requests: None):
273
273
  [
274
274
  ModelRequest(
275
275
  parts=[
276
- SystemPromptPart(content='this is the system prompt'),
276
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
277
277
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
278
278
  ]
279
279
  ),
@@ -504,7 +504,7 @@ async def test_request_result_type_with_arguments_str_response(allow_model_reque
504
504
  [
505
505
  ModelRequest(
506
506
  parts=[
507
- SystemPromptPart(content='System prompt value'),
507
+ SystemPromptPart(content='System prompt value', timestamp=IsNow(tz=timezone.utc)),
508
508
  UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)),
509
509
  ]
510
510
  ),
@@ -1070,7 +1070,7 @@ async def test_request_tool_call(allow_model_requests: None):
1070
1070
  [
1071
1071
  ModelRequest(
1072
1072
  parts=[
1073
- SystemPromptPart(content='this is the system prompt'),
1073
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
1074
1074
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
1075
1075
  ]
1076
1076
  ),
@@ -1207,7 +1207,7 @@ async def test_request_tool_call_with_result_type(allow_model_requests: None):
1207
1207
  [
1208
1208
  ModelRequest(
1209
1209
  parts=[
1210
- SystemPromptPart(content='this is the system prompt'),
1210
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
1211
1211
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
1212
1212
  ]
1213
1213
  ),
@@ -1347,7 +1347,7 @@ async def test_stream_tool_call_with_return_type(allow_model_requests: None):
1347
1347
  [
1348
1348
  ModelRequest(
1349
1349
  parts=[
1350
- SystemPromptPart(content='this is the system prompt'),
1350
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
1351
1351
  UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)),
1352
1352
  ]
1353
1353
  ),
@@ -1447,7 +1447,7 @@ async def test_stream_tool_call(allow_model_requests: None):
1447
1447
  [
1448
1448
  ModelRequest(
1449
1449
  parts=[
1450
- SystemPromptPart(content='this is the system prompt'),
1450
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
1451
1451
  UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)),
1452
1452
  ]
1453
1453
  ),
@@ -1550,7 +1550,7 @@ async def test_stream_tool_call_with_retry(allow_model_requests: None):
1550
1550
  [
1551
1551
  ModelRequest(
1552
1552
  parts=[
1553
- SystemPromptPart(content='this is the system prompt'),
1553
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
1554
1554
  UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)),
1555
1555
  ]
1556
1556
  ),
@@ -339,7 +339,7 @@ def test_call_all():
339
339
  [
340
340
  ModelRequest(
341
341
  parts=[
342
- SystemPromptPart(content='foobar'),
342
+ SystemPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc)),
343
343
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
344
344
  ]
345
345
  ),
@@ -322,7 +322,7 @@ async def test_request_tool_call(allow_model_requests: None):
322
322
  [
323
323
  ModelRequest(
324
324
  parts=[
325
- SystemPromptPart(content='this is the system prompt'),
325
+ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
326
326
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
327
327
  ]
328
328
  ),
@@ -57,7 +57,10 @@ async def test_google_vertex_provider_auth(allow_model_requests: None, http_clie
57
57
  await provider.client.post('/gemini-1.0-pro:generateContent')
58
58
  assert provider.region == 'us-central1'
59
59
  assert getattr(provider.client.auth, 'project_id') == 'my-project-id'
60
- assert getattr(provider.client.auth, 'token_created') is not None
60
+
61
+
62
+ async def mock_refresh_token():
63
+ return 'my-token'
61
64
 
62
65
 
63
66
  async def test_google_vertex_provider_service_account_file(
@@ -67,11 +70,10 @@ async def test_google_vertex_provider_service_account_file(
67
70
  save_service_account(service_account_path, 'my-project-id')
68
71
 
69
72
  provider = GoogleVertexProvider(service_account_file=service_account_path)
70
- monkeypatch.setattr(provider.client.auth, '_refresh_token', lambda: 'my-token')
73
+ monkeypatch.setattr(provider.client.auth, '_refresh_token', mock_refresh_token)
71
74
  await provider.client.post('/gemini-1.0-pro:generateContent')
72
75
  assert provider.region == 'us-central1'
73
76
  assert getattr(provider.client.auth, 'project_id') == 'my-project-id'
74
- assert getattr(provider.client.auth, 'token_created') is not None
75
77
 
76
78
 
77
79
  async def test_google_vertex_provider_service_account_file_info(
@@ -80,11 +82,10 @@ async def test_google_vertex_provider_service_account_file_info(
80
82
  account_info = prepare_service_account_contents('my-project-id')
81
83
 
82
84
  provider = GoogleVertexProvider(service_account_info=account_info)
83
- monkeypatch.setattr(provider.client.auth, '_refresh_token', lambda: 'my-token')
85
+ monkeypatch.setattr(provider.client.auth, '_refresh_token', mock_refresh_token)
84
86
  await provider.client.post('/gemini-1.0-pro:generateContent')
85
87
  assert provider.region == 'us-central1'
86
88
  assert getattr(provider.client.auth, 'project_id') == 'my-project-id'
87
- assert getattr(provider.client.auth, 'token_created') is not None
88
89
 
89
90
 
90
91
  async def test_google_vertex_provider_service_account_xor(allow_model_requests: None):
@@ -514,7 +514,7 @@ def test_run_with_history_new():
514
514
  [
515
515
  ModelRequest(
516
516
  parts=[
517
- SystemPromptPart(content='Foobar'),
517
+ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)),
518
518
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
519
519
  ]
520
520
  ),
@@ -538,7 +538,7 @@ def test_run_with_history_new():
538
538
  [
539
539
  ModelRequest(
540
540
  parts=[
541
- SystemPromptPart(content='Foobar'),
541
+ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)),
542
542
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
543
543
  ]
544
544
  ),
@@ -586,7 +586,7 @@ def test_run_with_history_new():
586
586
  [
587
587
  ModelRequest(
588
588
  parts=[
589
- SystemPromptPart(content='Foobar'),
589
+ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)),
590
590
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
591
591
  ]
592
592
  ),
@@ -632,7 +632,7 @@ def test_run_with_history_new_structured():
632
632
  [
633
633
  ModelRequest(
634
634
  parts=[
635
- SystemPromptPart(content='Foobar'),
635
+ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)),
636
636
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
637
637
  ]
638
638
  ),
@@ -670,7 +670,7 @@ def test_run_with_history_new_structured():
670
670
  [
671
671
  ModelRequest(
672
672
  parts=[
673
- SystemPromptPart(content='Foobar'),
673
+ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)),
674
674
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
675
675
  ],
676
676
  ),
@@ -1374,8 +1374,10 @@ def test_dynamic_false_no_reevaluate():
1374
1374
  [
1375
1375
  ModelRequest(
1376
1376
  parts=[
1377
- SystemPromptPart(content='Foobar', part_kind='system-prompt'),
1378
- SystemPromptPart(content=dynamic_value, part_kind='system-prompt'),
1377
+ SystemPromptPart(content='Foobar', part_kind='system-prompt', timestamp=IsNow(tz=timezone.utc)),
1378
+ SystemPromptPart(
1379
+ content=dynamic_value, part_kind='system-prompt', timestamp=IsNow(tz=timezone.utc)
1380
+ ),
1379
1381
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'),
1380
1382
  ],
1381
1383
  kind='request',
@@ -1397,10 +1399,11 @@ def test_dynamic_false_no_reevaluate():
1397
1399
  [
1398
1400
  ModelRequest(
1399
1401
  parts=[
1400
- SystemPromptPart(content='Foobar', part_kind='system-prompt'),
1402
+ SystemPromptPart(content='Foobar', part_kind='system-prompt', timestamp=IsNow(tz=timezone.utc)),
1401
1403
  SystemPromptPart(
1402
1404
  content='A', # Remains the same
1403
1405
  part_kind='system-prompt',
1406
+ timestamp=IsNow(tz=timezone.utc),
1404
1407
  ),
1405
1408
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'),
1406
1409
  ],
@@ -1446,11 +1449,12 @@ def test_dynamic_true_reevaluate_system_prompt():
1446
1449
  [
1447
1450
  ModelRequest(
1448
1451
  parts=[
1449
- SystemPromptPart(content='Foobar', part_kind='system-prompt'),
1452
+ SystemPromptPart(content='Foobar', part_kind='system-prompt', timestamp=IsNow(tz=timezone.utc)),
1450
1453
  SystemPromptPart(
1451
1454
  content=dynamic_value,
1452
1455
  part_kind='system-prompt',
1453
1456
  dynamic_ref=func.__qualname__,
1457
+ timestamp=IsNow(tz=timezone.utc),
1454
1458
  ),
1455
1459
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'),
1456
1460
  ],
@@ -1473,11 +1477,12 @@ def test_dynamic_true_reevaluate_system_prompt():
1473
1477
  [
1474
1478
  ModelRequest(
1475
1479
  parts=[
1476
- SystemPromptPart(content='Foobar', part_kind='system-prompt'),
1480
+ SystemPromptPart(content='Foobar', part_kind='system-prompt', timestamp=IsNow(tz=timezone.utc)),
1477
1481
  SystemPromptPart(
1478
1482
  content='B',
1479
1483
  part_kind='system-prompt',
1480
1484
  dynamic_ref=func.__qualname__,
1485
+ timestamp=IsNow(tz=timezone.utc),
1481
1486
  ),
1482
1487
  UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'),
1483
1488
  ],