pydantic-ai 0.1.9__tar.gz → 0.1.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/PKG-INFO +3 -3
  2. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/pyproject.toml +9 -4
  3. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/conftest.py +5 -5
  4. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_evaluator_common.py +1 -1
  5. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_otel.py +1 -1
  6. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/test_mermaid.py +1 -1
  7. pydantic_ai-0.1.11/tests/models/cassettes/test_anthropic/test_extra_headers.yaml +57 -0
  8. pydantic_ai-0.1.11/tests/models/cassettes/test_groq/test_extra_headers.yaml +71 -0
  9. pydantic_ai-0.1.11/tests/models/cassettes/test_openai/test_extra_headers.yaml +80 -0
  10. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_anthropic.py +13 -0
  11. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_bedrock.py +1 -1
  12. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_cohere.py +1 -3
  13. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_gemini.py +1 -1
  14. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_groq.py +10 -2
  15. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_mistral.py +1 -1
  16. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_model_function.py +2 -2
  17. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_openai.py +10 -2
  18. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_openai_responses.py +1 -1
  19. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_google_vertex.py +1 -1
  20. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_provider_names.py +1 -1
  21. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_cli.py +3 -39
  22. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_examples.py +19 -10
  23. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_streaming.py +3 -3
  24. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/.gitignore +0 -0
  25. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/LICENSE +0 -0
  26. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/Makefile +0 -0
  27. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/README.md +0 -0
  28. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/__init__.py +0 -0
  29. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/assets/dummy.pdf +0 -0
  30. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/assets/kiwi.png +0 -0
  31. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/assets/marcelo.mp3 +0 -0
  32. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/assets/small_video.mp4 +0 -0
  33. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +0 -0
  34. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_dict.yaml +0 -0
  35. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_error.yaml +0 -0
  36. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_image.yaml +0 -0
  37. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_image_resource.yaml +0 -0
  38. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_multiple_items.yaml +0 -0
  39. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_none.yaml +0 -0
  40. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_str.yaml +0 -0
  41. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_mcp/test_tool_returning_text_resource.yaml +0 -0
  42. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[anthropic].yaml +0 -0
  43. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[bedrock].yaml +0 -0
  44. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[cohere].yaml +0 -0
  45. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[gemini].yaml +0 -0
  46. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[groq].yaml +0 -0
  47. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[mistral].yaml +0 -0
  48. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/cassettes/test_settings/test_stop_settings[openai].yaml +0 -0
  49. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/__init__.py +0 -0
  50. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_dataset.py +0 -0
  51. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_evaluator_base.py +0 -0
  52. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_evaluator_context.py +0 -0
  53. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_evaluator_spec.py +0 -0
  54. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_evaluators.py +0 -0
  55. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_llm_as_a_judge.py +0 -0
  56. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_render_numbers.py +0 -0
  57. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_reporting.py +0 -0
  58. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_reports.py +0 -0
  59. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/test_utils.py +0 -0
  60. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/evals/utils.py +0 -0
  61. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/example_modules/README.md +0 -0
  62. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/example_modules/bank_database.py +0 -0
  63. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/example_modules/fake_database.py +0 -0
  64. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/example_modules/weather_service.py +0 -0
  65. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/__init__.py +0 -0
  66. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/test_file_persistence.py +0 -0
  67. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/test_graph.py +0 -0
  68. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/test_persistence.py +0 -0
  69. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/test_state.py +0 -0
  70. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/graph/test_utils.py +0 -0
  71. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/import_examples.py +0 -0
  72. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/json_body_serializer.py +0 -0
  73. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/mcp_server.py +0 -0
  74. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/__init__.py +0 -0
  75. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_anthropic_model_instructions.yaml +0 -0
  76. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
  77. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
  78. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_image_as_binary_content_tool_response.yaml +0 -0
  79. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  80. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  81. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  82. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
  83. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_empty_system_prompt.yaml +0 -0
  84. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
  85. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
  86. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_guardrail_config.yaml +0 -0
  87. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_instructions.yaml +0 -0
  88. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
  89. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
  90. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_other_parameters.yaml +0 -0
  91. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_performance_config.yaml +0 -0
  92. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
  93. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
  94. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
  95. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
  96. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_bedrock_multiple_documents_in_history.yaml +0 -0
  97. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
  98. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
  99. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
  100. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
  101. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
  102. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_video_as_binary_content_input.yaml +0 -0
  103. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_bedrock/test_video_url_input.yaml +0 -0
  104. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_cohere/test_cohere_model_instructions.yaml +0 -0
  105. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_cohere/test_request_simple_success_with_vcr.yaml +0 -0
  106. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
  107. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_false.yaml +0 -0
  108. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_true.yaml +0 -0
  109. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_gemini_drop_exclusive_maximum.yaml +0 -0
  110. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_gemini_exclusive_minimum_and_maximum.yaml +0 -0
  111. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_gemini_model_instructions.yaml +0 -0
  112. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  113. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_image_as_binary_content_tool_response.yaml +0 -0
  114. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  115. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_video_as_binary_content_input.yaml +0 -0
  116. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_gemini/test_video_url_input.yaml +0 -0
  117. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_groq/test_groq_model_instructions.yaml +0 -0
  118. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  119. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_groq/test_image_as_binary_content_tool_response.yaml +0 -0
  120. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  121. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_mistral/test_image_as_binary_content_tool_response.yaml +0 -0
  122. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_mistral/test_mistral_model_instructions.yaml +0 -0
  123. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  124. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_document_as_binary_content_input.yaml +0 -0
  125. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
  126. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  127. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_image_as_binary_content_tool_response.yaml +0 -0
  128. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_image_url_tool_response.yaml +0 -0
  129. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4.5-preview].yaml +0 -0
  130. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4o-mini].yaml +0 -0
  131. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_max_completion_tokens[o3-mini].yaml +0 -0
  132. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_multiple_agent_tool_calls.yaml +0 -0
  133. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_openai_audio_url_input.yaml +0 -0
  134. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_openai_instructions.yaml +0 -0
  135. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_openai_instructions_with_tool_calls_keep_instructions.yaml +0 -0
  136. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_openai_model_without_system_prompt.yaml +0 -0
  137. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  138. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  139. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai/test_user_id.yaml +0 -0
  140. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_audio_as_binary_content_input.yaml +0 -0
  141. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_input.yaml +0 -0
  142. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_tool_response.yaml +0 -0
  143. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_as_binary_content_input.yaml +0 -0
  144. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_url_input.yaml +0 -0
  145. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_image_url_input.yaml +0 -0
  146. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_builtin_tools.yaml +0 -0
  147. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_http_error.yaml +0 -0
  148. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_instructions.yaml +0 -0
  149. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_retry.yaml +0 -0
  150. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response.yaml +0 -0
  151. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response_with_tool_call.yaml +0 -0
  152. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_output_type.yaml +0 -0
  153. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_effort.yaml +0 -0
  154. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_generate_summary.yaml +0 -0
  155. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_stream.yaml +0 -0
  156. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_system_prompt.yaml +0 -0
  157. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/cassettes/test_openai_responses/test_openai_responses_text_document_url_input.yaml +0 -0
  158. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/mock_async_stream.py +0 -0
  159. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_fallback.py +0 -0
  160. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_instrumented.py +0 -0
  161. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_model.py +0 -0
  162. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_model_names.py +0 -0
  163. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/models/test_model_test.py +0 -0
  164. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/__init__.py +0 -0
  165. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
  166. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/cassettes/test_google_vertex/test_vertexai_provider.yaml +0 -0
  167. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_anthropic.py +0 -0
  168. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_azure.py +0 -0
  169. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_bedrock.py +0 -0
  170. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_cohere.py +0 -0
  171. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_deepseek.py +0 -0
  172. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_google_gla.py +0 -0
  173. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_groq.py +0 -0
  174. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_mistral.py +0 -0
  175. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/providers/test_openai.py +0 -0
  176. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_agent.py +0 -0
  177. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_deps.py +0 -0
  178. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_format_as_xml.py +0 -0
  179. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_json_body_serializer.py +0 -0
  180. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_live.py +0 -0
  181. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_logfire.py +0 -0
  182. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_mcp.py +0 -0
  183. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_messages.py +0 -0
  184. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_parts_manager.py +0 -0
  185. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_settings.py +0 -0
  186. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_tools.py +0 -0
  187. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_usage_limits.py +0 -0
  188. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/test_utils.py +0 -0
  189. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/typed_agent.py +0 -0
  190. {pydantic_ai-0.1.9 → pydantic_ai-0.1.11}/tests/typed_graph.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.1.9
3
+ Version: 0.1.11
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.9
31
+ Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.11
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.1.9; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.1.11; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -60,14 +60,13 @@ Documentation = "https://ai.pydantic.dev"
60
60
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
61
61
 
62
62
  [project.scripts]
63
- pai = "pydantic_ai._cli:app"
63
+ pai = "pydantic_ai._cli:cli_exit" # TODO remove this when clai has been out for a while
64
64
 
65
65
  [tool.uv.sources]
66
66
  pydantic-ai-slim = { workspace = true }
67
67
  pydantic-evals = { workspace = true }
68
68
  pydantic-graph = { workspace = true }
69
69
  pydantic-ai-examples = { workspace = true }
70
- mcp-run-python = { workspace = true }
71
70
 
72
71
  [tool.uv.workspace]
73
72
  members = [
@@ -75,6 +74,7 @@ members = [
75
74
  "pydantic_evals",
76
75
  "pydantic_graph",
77
76
  "mcp-run-python",
77
+ "clai",
78
78
  "examples",
79
79
  ]
80
80
 
@@ -106,6 +106,7 @@ include = [
106
106
  "pydantic_graph/**/*.py",
107
107
  "mcp-run-python/**/*.py",
108
108
  "examples/**/*.py",
109
+ "clai/**/*.py",
109
110
  "tests/**/*.py",
110
111
  "docs/**/*.py",
111
112
  ]
@@ -114,7 +115,7 @@ include = [
114
115
  extend-select = [
115
116
  "Q",
116
117
  "RUF100",
117
- "RUF018", # https://docs.astral.sh/ruff/rules/assignment-in-assert/
118
+ "RUF018", # https://docs.astral.sh/ruff/rules/assignment-in-assert/
118
119
  "C90",
119
120
  "UP",
120
121
  "I",
@@ -163,6 +164,7 @@ include = [
163
164
  "mcp-run-python",
164
165
  "tests",
165
166
  "examples",
167
+ "clai",
166
168
  ]
167
169
  venvPath = ".venv"
168
170
  # see https://github.com/microsoft/pyright/issues/7771 - we don't want to error on decorated functions in tests
@@ -193,7 +195,7 @@ filterwarnings = [
193
195
  "ignore:websockets.server.WebSocketServerProtocol is deprecated:DeprecationWarning",
194
196
  # random resource warnings; I suspect these are coming from vendor SDKs when running examples..
195
197
  "ignore:unclosed <socket:ResourceWarning",
196
- "ignore:unclosed event loop:ResourceWarning"
198
+ "ignore:unclosed event loop:ResourceWarning",
197
199
  ]
198
200
 
199
201
  # https://coverage.readthedocs.io/en/latest/config.html#run
@@ -215,7 +217,10 @@ show_missing = true
215
217
  ignore_errors = true
216
218
  precision = 2
217
219
  exclude_lines = [
220
+ # `# pragma: no cover` is standard marker for code that's not covered, this will error if code is covered
218
221
  'pragma: no cover',
222
+ # use `# pragma: lax no cover` if you want to ignore cases where (some of) the code is covered
223
+ 'pragma: lax no cover',
219
224
  'raise NotImplementedError',
220
225
  'if TYPE_CHECKING:',
221
226
  'if typing.TYPE_CHECKING:',
@@ -59,7 +59,7 @@ class TestEnv:
59
59
  def remove(self, name: str) -> None:
60
60
  self.envars[name] = os.environ.pop(name, None)
61
61
 
62
- def reset(self) -> None: # pragma: no cover
62
+ def reset(self) -> None:
63
63
  for name, value in self.envars.items():
64
64
  if value is None:
65
65
  os.environ.pop(name, None)
@@ -100,7 +100,7 @@ async def client_with_handler() -> AsyncIterator[ClientWithHandler]:
100
100
  try:
101
101
  yield create_client
102
102
  finally:
103
- if client: # pragma: no cover
103
+ if client:
104
104
  await client.aclose()
105
105
 
106
106
 
@@ -155,7 +155,7 @@ def create_module(tmp_path: Path, request: pytest.FixtureRequest) -> Callable[[s
155
155
 
156
156
 
157
157
  @contextmanager
158
- def try_import() -> Iterator[Callable[[], bool]]: # pragma: no cover
158
+ def try_import() -> Iterator[Callable[[], bool]]:
159
159
  import_success = False
160
160
 
161
161
  def check_import() -> bool:
@@ -276,7 +276,7 @@ def mistral_api_key() -> str:
276
276
 
277
277
 
278
278
  @pytest.fixture(scope='session')
279
- def bedrock_provider(): # pragma: no cover
279
+ def bedrock_provider():
280
280
  try:
281
281
  import boto3
282
282
 
@@ -304,7 +304,7 @@ def model(
304
304
  co_api_key: str,
305
305
  gemini_api_key: str,
306
306
  bedrock_provider: BedrockProvider,
307
- ) -> Model: # pragma: no cover
307
+ ) -> Model: # pragma: lax no cover
308
308
  try:
309
309
  if request.param == 'openai':
310
310
  from pydantic_ai.models.openai import OpenAIModel
@@ -40,7 +40,7 @@ if TYPE_CHECKING or imports_successful():
40
40
  self.expected_output = expected_output
41
41
  self.inputs = inputs
42
42
  self.duration = duration
43
- else: # pragma: no cover
43
+ else:
44
44
  MockContext = object
45
45
 
46
46
 
@@ -879,7 +879,7 @@ async def test_context_subtree_invalid_tracer_provider(mocker: MockerFixture):
879
879
 
880
880
  mocker.patch('pydantic_evals.otel._context_in_memory_span_exporter.get_tracer_provider', return_value=None)
881
881
  with pytest.raises(TypeError) as exc_info:
882
- with context_subtree(): # pragma: no cover
882
+ with context_subtree():
883
883
  pass
884
884
  assert str(exc_info.value) == snapshot(
885
885
  "Expected `tracer_provider` to have an `add_span_processor` method; got an instance of <class 'NoneType'>. For help resolving this, please create an issue at https://github.com/pydantic/pydantic-ai/issues."
@@ -259,7 +259,7 @@ def httpx_with_handler() -> Iterator[HttpxWithHandler]:
259
259
  try:
260
260
  yield create_client
261
261
  finally:
262
- if client: # pragma: no cover
262
+ if client:
263
263
  client.close()
264
264
 
265
265
 
@@ -0,0 +1,57 @@
1
+ interactions:
2
+ - request:
3
+ headers:
4
+ accept:
5
+ - application/json
6
+ accept-encoding:
7
+ - gzip, deflate
8
+ connection:
9
+ - keep-alive
10
+ content-length:
11
+ - '202'
12
+ content-type:
13
+ - application/json
14
+ host:
15
+ - api.anthropic.com
16
+ xProxy-Limit-IDs:
17
+ - monthly_budget
18
+ method: POST
19
+ parsed_body:
20
+ max_tokens: 1024
21
+ messages:
22
+ - content:
23
+ - text: hello
24
+ type: text
25
+ role: user
26
+ model: claude-3-opus-latest
27
+ stream: false
28
+ uri: https://api.anthropic.com/v1/messages
29
+ response:
30
+ headers:
31
+ connection:
32
+ - keep-alive
33
+ content-length:
34
+ - '328'
35
+ content-type:
36
+ - application/json
37
+ transfer-encoding:
38
+ - chunked
39
+ parsed_body:
40
+ content:
41
+ - text: Hello! How can I assist you today?
42
+ type: text
43
+ id: msg_01U58nruzfn9BrXrrF2hhb4m
44
+ model: claude-3-5-haiku-latest
45
+ role: assistant
46
+ stop_reason: end_turn
47
+ stop_sequence: null
48
+ type: message
49
+ usage:
50
+ cache_creation_input_tokens: 0
51
+ cache_read_input_tokens: 0
52
+ input_tokens: 20
53
+ output_tokens: 10
54
+ status:
55
+ code: 200
56
+ message: OK
57
+ version: 1
@@ -0,0 +1,71 @@
1
+ interactions:
2
+ - request:
3
+ headers:
4
+ accept:
5
+ - application/json
6
+ accept-encoding:
7
+ - gzip, deflate
8
+ connection:
9
+ - keep-alive
10
+ content-length:
11
+ - '187'
12
+ content-type:
13
+ - application/json
14
+ host:
15
+ - api.groq.com
16
+ xProxy-Limit-IDs:
17
+ - monthly_budget
18
+ method: POST
19
+ parsed_body:
20
+ messages:
21
+ - content: hello
22
+ role: user
23
+ model: llama-3.3-70b-versatile
24
+ n: 1
25
+ stream: false
26
+ uri: https://api.groq.com/openai/v1/chat/completions
27
+ response:
28
+ headers:
29
+ alt-svc:
30
+ - h3=":443"; ma=86400
31
+ cache-control:
32
+ - private, max-age=0, no-store, no-cache, must-revalidate
33
+ connection:
34
+ - keep-alive
35
+ content-length:
36
+ - '570'
37
+ content-type:
38
+ - application/json
39
+ transfer-encoding:
40
+ - chunked
41
+ vary:
42
+ - Origin, Accept-Encoding
43
+ parsed_body:
44
+ choices:
45
+ - finish_reason: stop
46
+ index: 0
47
+ logprobs: null
48
+ message:
49
+ content: Hello! How can I assist you today?
50
+ role: assistant
51
+ created: 1744043573
52
+ id: chatcmpl-7586b6a9-fb4b-4ec7-86a0-59f0a77844cf
53
+ model: llama-3.3-70b-versatile
54
+ object: chat.completion
55
+ system_fingerprint: fp_72a5dc99ee
56
+ usage:
57
+ completion_time: 0.029090909
58
+ completion_tokens: 8
59
+ prompt_time: 0.002665957
60
+ prompt_tokens: 48
61
+ queue_time: 0.100731848
62
+ total_time: 0.031756866
63
+ total_tokens: 56
64
+ usage_breakdown:
65
+ models: null
66
+ x_groq:
67
+ id: req_01jr8hj0hzeq9b86xqb5dn7wqs
68
+ status:
69
+ code: 200
70
+ message: OK
71
+ version: 1
@@ -0,0 +1,80 @@
1
+ interactions:
2
+ - request:
3
+ headers:
4
+ accept:
5
+ - application/json
6
+ accept-encoding:
7
+ - gzip, deflate
8
+ connection:
9
+ - keep-alive
10
+ content-length:
11
+ - '103'
12
+ content-type:
13
+ - application/json
14
+ host:
15
+ - api.openai.com
16
+ xProxy-Limit-IDs:
17
+ - monthly_budget
18
+ method: POST
19
+ parsed_body:
20
+ messages:
21
+ - content: hello
22
+ role: user
23
+ model: gpt-4o
24
+ n: 1
25
+ stream: false
26
+ uri: https://api.openai.com/v1/chat/completions
27
+ response:
28
+ headers:
29
+ access-control-expose-headers:
30
+ - X-Request-ID
31
+ alt-svc:
32
+ - h3=":443"; ma=86400
33
+ connection:
34
+ - keep-alive
35
+ content-length:
36
+ - '835'
37
+ content-type:
38
+ - application/json
39
+ openai-organization:
40
+ - pydantic-28gund
41
+ openai-processing-ms:
42
+ - '584'
43
+ openai-version:
44
+ - '2020-10-01'
45
+ strict-transport-security:
46
+ - max-age=31536000; includeSubDomains; preload
47
+ transfer-encoding:
48
+ - chunked
49
+ parsed_body:
50
+ choices:
51
+ - finish_reason: stop
52
+ index: 0
53
+ logprobs: null
54
+ message:
55
+ annotations: []
56
+ content: Hello! How can I assist you today?
57
+ refusal: null
58
+ role: assistant
59
+ created: 1743073438
60
+ id: chatcmpl-BFfJeRdAVFPUVWxV3OYH1tSR5KvrI
61
+ model: gpt-4o-2024-08-06
62
+ object: chat.completion
63
+ service_tier: default
64
+ system_fingerprint: fp_898ac29719
65
+ usage:
66
+ completion_tokens: 10
67
+ completion_tokens_details:
68
+ accepted_prediction_tokens: 0
69
+ audio_tokens: 0
70
+ reasoning_tokens: 0
71
+ rejected_prediction_tokens: 0
72
+ prompt_tokens: 8
73
+ prompt_tokens_details:
74
+ audio_tokens: 0
75
+ cached_tokens: 0
76
+ total_tokens: 18
77
+ status:
78
+ code: 200
79
+ message: OK
80
+ version: 1
@@ -634,6 +634,19 @@ async def test_image_url_input(allow_model_requests: None, anthropic_api_key: st
634
634
  )
635
635
 
636
636
 
637
+ @pytest.mark.vcr()
638
+ async def test_extra_headers(allow_model_requests: None, anthropic_api_key: str):
639
+ # This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
640
+ m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
641
+ agent = Agent(
642
+ m,
643
+ model_settings=AnthropicModelSettings(
644
+ anthropic_metadata={'user_id': '123'}, extra_headers={'Extra-Header-Key': 'Extra-Header-Value'}
645
+ ),
646
+ )
647
+ await agent.run('hello')
648
+
649
+
637
650
  @pytest.mark.vcr()
638
651
  async def test_image_url_input_invalid_mime_type(allow_model_requests: None, anthropic_api_key: str):
639
652
  m = AnthropicModel('claude-3-5-haiku-latest', provider=AnthropicProvider(api_key=anthropic_api_key))
@@ -358,7 +358,7 @@ async def test_bedrock_model_iter_stream(allow_model_requests: None, bedrock_pro
358
358
  Args:
359
359
  city: The city name.
360
360
  """
361
- return '30°C' # pragma: no cover
361
+ return '30°C'
362
362
 
363
363
  event_parts: list[Any] = []
364
364
  async with agent.iter(user_prompt='What is the temperature of the capital of France?') as agent_run:
@@ -66,9 +66,7 @@ class MockAsyncClientV2:
66
66
  def create_mock(cls, completions: MockChatResponse | Sequence[MockChatResponse]) -> AsyncClientV2:
67
67
  return cast(AsyncClientV2, cls(completions=completions))
68
68
 
69
- async def chat( # pragma: no cover
70
- self, *_args: Any, **_kwargs: Any
71
- ) -> ChatResponse:
69
+ async def chat(self, *_args: Any, **_kwargs: Any) -> ChatResponse:
72
70
  assert self.completions is not None
73
71
  if isinstance(self.completions, Sequence):
74
72
  raise_if_exception(self.completions[self.index])
@@ -420,7 +420,7 @@ async def get_gemini_client(
420
420
 
421
421
  def gemini_response(content: _GeminiContent, finish_reason: Literal['STOP'] | None = 'STOP') -> _GeminiResponse:
422
422
  candidate = _GeminiCandidates(content=content, index=0, safety_ratings=[])
423
- if finish_reason: # pragma: no cover
423
+ if finish_reason:
424
424
  candidate['finish_reason'] = finish_reason
425
425
  return _GeminiResponse(candidates=[candidate], usage_metadata=example_usage(), model_version='gemini-1.5-flash-123')
426
426
 
@@ -46,7 +46,7 @@ with try_import() as imports_successful:
46
46
  from groq.types.chat.chat_completion_message_tool_call import Function
47
47
  from groq.types.completion_usage import CompletionUsage
48
48
 
49
- from pydantic_ai.models.groq import GroqModel
49
+ from pydantic_ai.models.groq import GroqModel, GroqModelSettings
50
50
  from pydantic_ai.providers.groq import GroqProvider
51
51
 
52
52
  # note: we use Union here so that casting works with Python 3.9
@@ -489,7 +489,7 @@ async def test_no_content(allow_model_requests: None):
489
489
 
490
490
  with pytest.raises(UnexpectedModelBehavior, match='Received empty model response'):
491
491
  async with agent.run_stream(''):
492
- pass # pragma: no cover
492
+ pass
493
493
 
494
494
 
495
495
  async def test_no_delta(allow_model_requests: None):
@@ -504,6 +504,14 @@ async def test_no_delta(allow_model_requests: None):
504
504
  assert result.is_complete
505
505
 
506
506
 
507
+ @pytest.mark.vcr()
508
+ async def test_extra_headers(allow_model_requests: None, groq_api_key: str):
509
+ # This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
510
+ m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(api_key=groq_api_key))
511
+ agent = Agent(m, model_settings=GroqModelSettings(extra_headers={'Extra-Header-Key': 'Extra-Header-Value'}))
512
+ await agent.run('hello')
513
+
514
+
507
515
  @pytest.mark.vcr()
508
516
  async def test_image_url_input(allow_model_requests: None, groq_api_key: str):
509
517
  m = GroqModel('meta-llama/llama-4-scout-17b-16e-instruct', provider=GroqProvider(api_key=groq_api_key))
@@ -92,7 +92,7 @@ class MockMistralAI:
92
92
  ) -> Mistral:
93
93
  return cast(Mistral, cls(stream=completions_streams))
94
94
 
95
- async def chat_completions_create( # pragma: no cover
95
+ async def chat_completions_create( # pragma: lax no cover
96
96
  self, *_args: Any, stream: bool = False, **_kwargs: Any
97
97
  ) -> MistralChatCompletionResponse | MockAsyncStream[MockCompletionEvent]:
98
98
  if stream or self.stream:
@@ -92,7 +92,7 @@ def test_simple():
92
92
  )
93
93
 
94
94
 
95
- async def weather_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: # pragma: no cover
95
+ async def weather_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: # pragma: lax no cover
96
96
  assert info.allow_text_output
97
97
  assert {t.name for t in info.function_tools} == {'get_location', 'get_weather'}
98
98
  last = messages[-1].parts[-1]
@@ -194,7 +194,7 @@ def test_weather():
194
194
  assert result.output == 'Sunny in Ipswich'
195
195
 
196
196
 
197
- async def call_function_model(messages: list[ModelMessage], _: AgentInfo) -> ModelResponse: # pragma: no cover
197
+ async def call_function_model(messages: list[ModelMessage], _: AgentInfo) -> ModelResponse: # pragma: lax no cover
198
198
  last = messages[-1].parts[-1]
199
199
  if isinstance(last, UserPromptPart):
200
200
  if isinstance(last.content, str) and last.content.startswith('{'):
@@ -99,7 +99,7 @@ class MockOpenAI:
99
99
  ) -> AsyncOpenAI:
100
100
  return cast(AsyncOpenAI, cls(stream=stream))
101
101
 
102
- async def chat_completions_create( # pragma: no cover
102
+ async def chat_completions_create( # pragma: lax no cover
103
103
  self, *_args: Any, stream: bool = False, **kwargs: Any
104
104
  ) -> chat.ChatCompletion | MockAsyncStream[MockChatCompletionChunk]:
105
105
  self.chat_completion_kwargs.append({k: v for k, v in kwargs.items() if v is not NOT_GIVEN})
@@ -519,7 +519,7 @@ async def test_no_content(allow_model_requests: None):
519
519
 
520
520
  with pytest.raises(UnexpectedModelBehavior, match='Received empty model response'):
521
521
  async with agent.run_stream(''):
522
- pass # pragma: no cover
522
+ pass
523
523
 
524
524
 
525
525
  async def test_no_delta(allow_model_requests: None):
@@ -862,6 +862,14 @@ async def test_multiple_agent_tool_calls(allow_model_requests: None, gemini_api_
862
862
  assert result.output == snapshot('The capital of England is London.')
863
863
 
864
864
 
865
+ @pytest.mark.vcr()
866
+ async def test_extra_headers(allow_model_requests: None, openai_api_key: str):
867
+ # This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
868
+ m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
869
+ agent = Agent(m, model_settings=OpenAIModelSettings(extra_headers={'Extra-Header-Key': 'Extra-Header-Value'}))
870
+ await agent.run('hello')
871
+
872
+
865
873
  @pytest.mark.vcr()
866
874
  async def test_user_id(allow_model_requests: None, openai_api_key: str):
867
875
  # This test doesn't do anything, it's just here to ensure that calls with `user` don't cause errors, including type.
@@ -68,7 +68,7 @@ async def test_openai_responses_output_type(allow_model_requests: None, openai_a
68
68
 
69
69
  agent = Agent(model=model, output_type=MyOutput)
70
70
  result = await agent.run('Give me the name and age of Brazil, Argentina, and Chile.')
71
- assert result.output == snapshot({'name': 'Brazil', 'age': 2023}) # pragma: no cover
71
+ assert result.output == snapshot({'name': 'Brazil', 'age': 2023})
72
72
 
73
73
 
74
74
  async def test_openai_responses_reasoning_effort(allow_model_requests: None, openai_api_key: str):
@@ -147,7 +147,7 @@ def save_service_account(service_account_path: Path, project_id: str) -> None:
147
147
  def vertex_provider_auth(mocker: MockerFixture) -> None:
148
148
  # Locally, we authenticate via `gcloud` CLI, so we don't need to patch anything.
149
149
  if not os.getenv('CI'):
150
- return # pragma: no cover
150
+ return # pragma: lax no cover
151
151
 
152
152
  @dataclass
153
153
  class NoOpCredentials:
@@ -37,7 +37,7 @@ with try_import() as imports_successful:
37
37
  ]
38
38
 
39
39
  if not imports_successful():
40
- test_infer_provider_params = [] # pragma: no cover
40
+ test_infer_provider_params = []
41
41
 
42
42
  pytestmark = pytest.mark.skipif(not imports_successful(), reason='need to install all extra packages')
43
43
 
@@ -1,5 +1,3 @@
1
- import os
2
- import sys
3
1
  from io import StringIO
4
2
  from typing import Any
5
3
 
@@ -31,40 +29,6 @@ def test_cli_version(capfd: CaptureFixture[str]):
31
29
  assert capfd.readouterr().out.startswith('pai - PydanticAI CLI')
32
30
 
33
31
 
34
- @pytest.mark.skipif(not os.getenv('CI', False), reason="Marcelo can't make this test pass locally")
35
- @pytest.mark.skipif(sys.version_info >= (3, 13), reason='slightly different output with 3.13')
36
- def test_cli_help(capfd: CaptureFixture[str]):
37
- with pytest.raises(SystemExit) as exc:
38
- cli(['--help'])
39
- assert exc.value.code == 0
40
-
41
- assert capfd.readouterr().out.splitlines() == snapshot(
42
- [
43
- 'usage: pai [-h] [-m [MODEL]] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt]',
44
- '',
45
- IsStr(),
46
- '',
47
- 'Special prompt:',
48
- '* `/exit` - exit the interactive mode',
49
- '* `/markdown` - show the last markdown output of the last question',
50
- '* `/multiline` - toggle multiline mode',
51
- '',
52
- 'positional arguments:',
53
- ' prompt AI Prompt, if omitted fall into interactive mode',
54
- '',
55
- IsStr(),
56
- ' -h, --help show this help message and exit',
57
- ' -m [MODEL], --model [MODEL]',
58
- ' Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4o". Defaults to "openai:gpt-4o".',
59
- ' -l, --list-models List all available models and exit',
60
- ' -t [CODE_THEME], --code-theme [CODE_THEME]',
61
- ' Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "monokai".',
62
- ' --no-stream Whether to stream responses from the model',
63
- ' --version Show version and exit',
64
- ]
65
- )
66
-
67
-
68
32
  def test_invalid_model(capfd: CaptureFixture[str]):
69
33
  assert cli(['--model', 'potato']) == 1
70
34
  assert capfd.readouterr().out.splitlines() == snapshot(
@@ -169,7 +133,7 @@ def test_code_theme_unset(mocker: MockerFixture, env: TestEnv):
169
133
  mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat')
170
134
  cli([])
171
135
  mock_run_chat.assert_awaited_once_with(
172
- IsInstance(PromptSession), True, IsInstance(Agent), IsInstance(Console), 'monokai'
136
+ IsInstance(PromptSession), True, IsInstance(Agent), IsInstance(Console), 'monokai', 'pai'
173
137
  )
174
138
 
175
139
 
@@ -178,7 +142,7 @@ def test_code_theme_light(mocker: MockerFixture, env: TestEnv):
178
142
  mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat')
179
143
  cli(['--code-theme=light'])
180
144
  mock_run_chat.assert_awaited_once_with(
181
- IsInstance(PromptSession), True, IsInstance(Agent), IsInstance(Console), 'default'
145
+ IsInstance(PromptSession), True, IsInstance(Agent), IsInstance(Console), 'default', 'pai'
182
146
  )
183
147
 
184
148
 
@@ -187,5 +151,5 @@ def test_code_theme_dark(mocker: MockerFixture, env: TestEnv):
187
151
  mock_run_chat = mocker.patch('pydantic_ai._cli.run_chat')
188
152
  cli(['--code-theme=dark'])
189
153
  mock_run_chat.assert_awaited_once_with(
190
- IsInstance(PromptSession), True, IsInstance(Agent), IsInstance(Console), 'monokai'
154
+ IsInstance(PromptSession), True, IsInstance(Agent), IsInstance(Console), 'monokai', 'pai'
191
155
  )