pydantic-ai 0.1.10__tar.gz → 0.1.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai might be problematic. Click here for more details.

Files changed (190) hide show
  1. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/PKG-INFO +3 -3
  2. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/pyproject.toml +9 -4
  3. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/conftest.py +8 -7
  4. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_evaluator_common.py +1 -1
  5. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_evaluators.py +1 -2
  6. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_otel.py +1 -1
  7. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/test_mermaid.py +1 -1
  8. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_anthropic.py +155 -2
  9. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_bedrock.py +49 -1
  10. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_cohere.py +26 -5
  11. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_fallback.py +13 -0
  12. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_gemini.py +73 -2
  13. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_groq.py +10 -1
  14. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_instrumented.py +13 -23
  15. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_mistral.py +28 -1
  16. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_model_function.py +12 -3
  17. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_model_test.py +3 -0
  18. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_openai.py +106 -5
  19. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_openai_responses.py +38 -1
  20. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_google_vertex.py +1 -1
  21. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_provider_names.py +1 -1
  22. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_agent.py +80 -14
  23. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_cli.py +3 -39
  24. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_examples.py +20 -11
  25. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_mcp.py +248 -0
  26. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_streaming.py +27 -4
  27. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_usage_limits.py +5 -0
  28. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/.gitignore +0 -0
  29. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/LICENSE +0 -0
  30. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/Makefile +0 -0
  31. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/README.md +0 -0
  32. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/__init__.py +0 -0
  33. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/assets/dummy.pdf +0 -0
  34. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/assets/kiwi.png +0 -0
  35. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/assets/marcelo.mp3 +0 -0
  36. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/assets/small_video.mp4 +0 -0
  37. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +0 -0
  38. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_dict.yaml +0 -0
  39. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_error.yaml +0 -0
  40. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_image.yaml +0 -0
  41. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_image_resource.yaml +0 -0
  42. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_multiple_items.yaml +0 -0
  43. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_none.yaml +0 -0
  44. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_str.yaml +0 -0
  45. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_mcp/test_tool_returning_text_resource.yaml +0 -0
  46. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[anthropic].yaml +0 -0
  47. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[bedrock].yaml +0 -0
  48. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[cohere].yaml +0 -0
  49. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[gemini].yaml +0 -0
  50. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[groq].yaml +0 -0
  51. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[mistral].yaml +0 -0
  52. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/cassettes/test_settings/test_stop_settings[openai].yaml +0 -0
  53. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/__init__.py +0 -0
  54. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_dataset.py +0 -0
  55. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_evaluator_base.py +0 -0
  56. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_evaluator_context.py +0 -0
  57. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_evaluator_spec.py +0 -0
  58. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_llm_as_a_judge.py +0 -0
  59. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_render_numbers.py +0 -0
  60. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_reporting.py +0 -0
  61. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_reports.py +0 -0
  62. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/test_utils.py +0 -0
  63. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/evals/utils.py +0 -0
  64. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/example_modules/README.md +0 -0
  65. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/example_modules/bank_database.py +0 -0
  66. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/example_modules/fake_database.py +0 -0
  67. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/example_modules/weather_service.py +0 -0
  68. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/__init__.py +0 -0
  69. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/test_file_persistence.py +0 -0
  70. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/test_graph.py +0 -0
  71. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/test_persistence.py +0 -0
  72. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/test_state.py +0 -0
  73. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/graph/test_utils.py +0 -0
  74. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/import_examples.py +0 -0
  75. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/json_body_serializer.py +0 -0
  76. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/mcp_server.py +0 -0
  77. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/__init__.py +0 -0
  78. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_anthropic_model_instructions.yaml +0 -0
  79. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
  80. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
  81. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_extra_headers.yaml +0 -0
  82. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_image_as_binary_content_tool_response.yaml +0 -0
  83. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  84. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  85. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  86. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
  87. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_empty_system_prompt.yaml +0 -0
  88. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
  89. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
  90. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_guardrail_config.yaml +0 -0
  91. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_instructions.yaml +0 -0
  92. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
  93. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
  94. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_other_parameters.yaml +0 -0
  95. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_performance_config.yaml +0 -0
  96. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
  97. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
  98. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
  99. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
  100. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_bedrock_multiple_documents_in_history.yaml +0 -0
  101. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
  102. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
  103. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
  104. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
  105. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
  106. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_video_as_binary_content_input.yaml +0 -0
  107. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_bedrock/test_video_url_input.yaml +0 -0
  108. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_cohere/test_cohere_model_instructions.yaml +0 -0
  109. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_cohere/test_request_simple_success_with_vcr.yaml +0 -0
  110. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
  111. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_false.yaml +0 -0
  112. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_true.yaml +0 -0
  113. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_gemini_drop_exclusive_maximum.yaml +0 -0
  114. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_gemini_exclusive_minimum_and_maximum.yaml +0 -0
  115. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_gemini_model_instructions.yaml +0 -0
  116. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  117. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_image_as_binary_content_tool_response.yaml +0 -0
  118. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  119. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_video_as_binary_content_input.yaml +0 -0
  120. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_gemini/test_video_url_input.yaml +0 -0
  121. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_groq/test_extra_headers.yaml +0 -0
  122. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_groq/test_groq_model_instructions.yaml +0 -0
  123. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  124. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_groq/test_image_as_binary_content_tool_response.yaml +0 -0
  125. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  126. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_mistral/test_image_as_binary_content_tool_response.yaml +0 -0
  127. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_mistral/test_mistral_model_instructions.yaml +0 -0
  128. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  129. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_document_as_binary_content_input.yaml +0 -0
  130. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
  131. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_extra_headers.yaml +0 -0
  132. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  133. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_image_as_binary_content_tool_response.yaml +0 -0
  134. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_image_url_tool_response.yaml +0 -0
  135. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4.5-preview].yaml +0 -0
  136. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4o-mini].yaml +0 -0
  137. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_max_completion_tokens[o3-mini].yaml +0 -0
  138. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_multiple_agent_tool_calls.yaml +0 -0
  139. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_openai_audio_url_input.yaml +0 -0
  140. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_openai_instructions.yaml +0 -0
  141. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_openai_instructions_with_tool_calls_keep_instructions.yaml +0 -0
  142. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_openai_model_without_system_prompt.yaml +0 -0
  143. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  144. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  145. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai/test_user_id.yaml +0 -0
  146. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_audio_as_binary_content_input.yaml +0 -0
  147. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_input.yaml +0 -0
  148. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_tool_response.yaml +0 -0
  149. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_as_binary_content_input.yaml +0 -0
  150. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_url_input.yaml +0 -0
  151. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_image_url_input.yaml +0 -0
  152. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_builtin_tools.yaml +0 -0
  153. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_http_error.yaml +0 -0
  154. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_instructions.yaml +0 -0
  155. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_retry.yaml +0 -0
  156. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response.yaml +0 -0
  157. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response_with_tool_call.yaml +0 -0
  158. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_output_type.yaml +0 -0
  159. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_effort.yaml +0 -0
  160. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_generate_summary.yaml +0 -0
  161. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_stream.yaml +0 -0
  162. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_system_prompt.yaml +0 -0
  163. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/cassettes/test_openai_responses/test_openai_responses_text_document_url_input.yaml +0 -0
  164. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/mock_async_stream.py +0 -0
  165. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_model.py +0 -0
  166. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/models/test_model_names.py +0 -0
  167. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/__init__.py +0 -0
  168. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
  169. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/cassettes/test_google_vertex/test_vertexai_provider.yaml +0 -0
  170. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_anthropic.py +0 -0
  171. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_azure.py +0 -0
  172. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_bedrock.py +0 -0
  173. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_cohere.py +0 -0
  174. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_deepseek.py +0 -0
  175. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_google_gla.py +0 -0
  176. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_groq.py +0 -0
  177. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_mistral.py +0 -0
  178. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/providers/test_openai.py +0 -0
  179. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_deps.py +0 -0
  180. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_format_as_xml.py +0 -0
  181. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_json_body_serializer.py +0 -0
  182. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_live.py +0 -0
  183. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_logfire.py +0 -0
  184. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_messages.py +0 -0
  185. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_parts_manager.py +0 -0
  186. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_settings.py +0 -0
  187. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_tools.py +0 -0
  188. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/test_utils.py +0 -0
  189. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/typed_agent.py +0 -0
  190. {pydantic_ai-0.1.10 → pydantic_ai-0.1.12}/tests/typed_graph.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.1.10
3
+ Version: 0.1.12
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.10
31
+ Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.12
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.1.10; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.1.12; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -60,14 +60,13 @@ Documentation = "https://ai.pydantic.dev"
60
60
  Changelog = "https://github.com/pydantic/pydantic-ai/releases"
61
61
 
62
62
  [project.scripts]
63
- pai = "pydantic_ai._cli:app"
63
+ pai = "pydantic_ai._cli:cli_exit" # TODO remove this when clai has been out for a while
64
64
 
65
65
  [tool.uv.sources]
66
66
  pydantic-ai-slim = { workspace = true }
67
67
  pydantic-evals = { workspace = true }
68
68
  pydantic-graph = { workspace = true }
69
69
  pydantic-ai-examples = { workspace = true }
70
- mcp-run-python = { workspace = true }
71
70
 
72
71
  [tool.uv.workspace]
73
72
  members = [
@@ -75,6 +74,7 @@ members = [
75
74
  "pydantic_evals",
76
75
  "pydantic_graph",
77
76
  "mcp-run-python",
77
+ "clai",
78
78
  "examples",
79
79
  ]
80
80
 
@@ -106,6 +106,7 @@ include = [
106
106
  "pydantic_graph/**/*.py",
107
107
  "mcp-run-python/**/*.py",
108
108
  "examples/**/*.py",
109
+ "clai/**/*.py",
109
110
  "tests/**/*.py",
110
111
  "docs/**/*.py",
111
112
  ]
@@ -114,7 +115,7 @@ include = [
114
115
  extend-select = [
115
116
  "Q",
116
117
  "RUF100",
117
- "RUF018", # https://docs.astral.sh/ruff/rules/assignment-in-assert/
118
+ "RUF018", # https://docs.astral.sh/ruff/rules/assignment-in-assert/
118
119
  "C90",
119
120
  "UP",
120
121
  "I",
@@ -163,6 +164,7 @@ include = [
163
164
  "mcp-run-python",
164
165
  "tests",
165
166
  "examples",
167
+ "clai",
166
168
  ]
167
169
  venvPath = ".venv"
168
170
  # see https://github.com/microsoft/pyright/issues/7771 - we don't want to error on decorated functions in tests
@@ -193,7 +195,7 @@ filterwarnings = [
193
195
  "ignore:websockets.server.WebSocketServerProtocol is deprecated:DeprecationWarning",
194
196
  # random resource warnings; I suspect these are coming from vendor SDKs when running examples..
195
197
  "ignore:unclosed <socket:ResourceWarning",
196
- "ignore:unclosed event loop:ResourceWarning"
198
+ "ignore:unclosed event loop:ResourceWarning",
197
199
  ]
198
200
 
199
201
  # https://coverage.readthedocs.io/en/latest/config.html#run
@@ -215,7 +217,10 @@ show_missing = true
215
217
  ignore_errors = true
216
218
  precision = 2
217
219
  exclude_lines = [
220
+ # `# pragma: no cover` is standard marker for code that's not covered, this will error if code is covered
218
221
  'pragma: no cover',
222
+ # use `# pragma: lax no cover` if you want to ignore cases where (some of) the code is covered
223
+ 'pragma: lax no cover',
219
224
  'raise NotImplementedError',
220
225
  'if TYPE_CHECKING:',
221
226
  'if typing.TYPE_CHECKING:',
@@ -24,7 +24,7 @@ import pydantic_ai.models
24
24
  from pydantic_ai.messages import BinaryContent
25
25
  from pydantic_ai.models import Model, cached_async_http_client
26
26
 
27
- __all__ = 'IsDatetime', 'IsFloat', 'IsNow', 'IsStr', 'TestEnv', 'ClientWithHandler', 'try_import'
27
+ __all__ = 'IsDatetime', 'IsFloat', 'IsNow', 'IsStr', 'IsInt', 'TestEnv', 'ClientWithHandler', 'try_import'
28
28
 
29
29
 
30
30
  pydantic_ai.models.ALLOW_MODEL_REQUESTS = False
@@ -34,10 +34,11 @@ if TYPE_CHECKING:
34
34
 
35
35
  def IsDatetime(*args: Any, **kwargs: Any) -> datetime: ...
36
36
  def IsFloat(*args: Any, **kwargs: Any) -> float: ...
37
+ def IsInt(*args: Any, **kwargs: Any) -> int: ...
37
38
  def IsNow(*args: Any, **kwargs: Any) -> datetime: ...
38
39
  def IsStr(*args: Any, **kwargs: Any) -> str: ...
39
40
  else:
40
- from dirty_equals import IsDatetime, IsFloat, IsNow as _IsNow, IsStr
41
+ from dirty_equals import IsDatetime, IsFloat, IsInt, IsNow as _IsNow, IsStr
41
42
 
42
43
  def IsNow(*args: Any, **kwargs: Any):
43
44
  # Increase the default value of `delta` to 10 to reduce test flakiness on overburdened machines
@@ -59,7 +60,7 @@ class TestEnv:
59
60
  def remove(self, name: str) -> None:
60
61
  self.envars[name] = os.environ.pop(name, None)
61
62
 
62
- def reset(self) -> None: # pragma: no cover
63
+ def reset(self) -> None:
63
64
  for name, value in self.envars.items():
64
65
  if value is None:
65
66
  os.environ.pop(name, None)
@@ -100,7 +101,7 @@ async def client_with_handler() -> AsyncIterator[ClientWithHandler]:
100
101
  try:
101
102
  yield create_client
102
103
  finally:
103
- if client: # pragma: no cover
104
+ if client:
104
105
  await client.aclose()
105
106
 
106
107
 
@@ -155,7 +156,7 @@ def create_module(tmp_path: Path, request: pytest.FixtureRequest) -> Callable[[s
155
156
 
156
157
 
157
158
  @contextmanager
158
- def try_import() -> Iterator[Callable[[], bool]]: # pragma: no cover
159
+ def try_import() -> Iterator[Callable[[], bool]]:
159
160
  import_success = False
160
161
 
161
162
  def check_import() -> bool:
@@ -276,7 +277,7 @@ def mistral_api_key() -> str:
276
277
 
277
278
 
278
279
  @pytest.fixture(scope='session')
279
- def bedrock_provider(): # pragma: no cover
280
+ def bedrock_provider():
280
281
  try:
281
282
  import boto3
282
283
 
@@ -304,7 +305,7 @@ def model(
304
305
  co_api_key: str,
305
306
  gemini_api_key: str,
306
307
  bedrock_provider: BedrockProvider,
307
- ) -> Model: # pragma: no cover
308
+ ) -> Model: # pragma: lax no cover
308
309
  try:
309
310
  if request.param == 'openai':
310
311
  from pydantic_ai.models.openai import OpenAIModel
@@ -40,7 +40,7 @@ if TYPE_CHECKING or imports_successful():
40
40
  self.expected_output = expected_output
41
41
  self.inputs = inputs
42
42
  self.duration = duration
43
- else: # pragma: no cover
43
+ else:
44
44
  MockContext = object
45
45
 
46
46
 
@@ -10,7 +10,6 @@ from pydantic import BaseModel, TypeAdapter
10
10
  from pydantic_ai.messages import ModelMessage, ModelResponse
11
11
  from pydantic_ai.models import Model, ModelRequestParameters
12
12
  from pydantic_ai.settings import ModelSettings
13
- from pydantic_ai.usage import Usage
14
13
 
15
14
  from ..conftest import try_import
16
15
 
@@ -122,7 +121,7 @@ async def test_llm_judge_serialization():
122
121
  messages: list[ModelMessage],
123
122
  model_settings: ModelSettings | None,
124
123
  model_request_parameters: ModelRequestParameters,
125
- ) -> tuple[ModelResponse, Usage]:
124
+ ) -> ModelResponse:
126
125
  raise NotImplementedError
127
126
 
128
127
  @property
@@ -879,7 +879,7 @@ async def test_context_subtree_invalid_tracer_provider(mocker: MockerFixture):
879
879
 
880
880
  mocker.patch('pydantic_evals.otel._context_in_memory_span_exporter.get_tracer_provider', return_value=None)
881
881
  with pytest.raises(TypeError) as exc_info:
882
- with context_subtree(): # pragma: no cover
882
+ with context_subtree():
883
883
  pass
884
884
  assert str(exc_info.value) == snapshot(
885
885
  "Expected `tracer_provider` to have an `add_span_processor` method; got an instance of <class 'NoneType'>. For help resolving this, please create an issue at https://github.com/pydantic/pydantic-ai/issues."
@@ -259,7 +259,7 @@ def httpx_with_handler() -> Iterator[HttpxWithHandler]:
259
259
  try:
260
260
  yield create_client
261
261
  finally:
262
- if client: # pragma: no cover
262
+ if client:
263
263
  client.close()
264
264
 
265
265
 
@@ -6,7 +6,7 @@ from collections.abc import Sequence
6
6
  from dataclasses import dataclass, field
7
7
  from datetime import timezone
8
8
  from functools import cached_property
9
- from typing import Any, TypeVar, Union, cast
9
+ from typing import Any, Callable, TypeVar, Union, cast
10
10
 
11
11
  import httpx
12
12
  import pytest
@@ -52,7 +52,11 @@ with try_import() as imports_successful:
52
52
  )
53
53
  from anthropic.types.raw_message_delta_event import Delta
54
54
 
55
- from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings
55
+ from pydantic_ai.models.anthropic import (
56
+ AnthropicModel,
57
+ AnthropicModelSettings,
58
+ _map_usage, # pyright: ignore[reportPrivateUsage]
59
+ )
56
60
  from pydantic_ai.providers.anthropic import AnthropicProvider
57
61
 
58
62
  # note: we use Union here so that casting works with Python 3.9
@@ -169,12 +173,26 @@ async def test_sync_request_text_response(allow_model_requests: None):
169
173
  ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
170
174
  ModelResponse(
171
175
  parts=[TextPart(content='world')],
176
+ usage=Usage(
177
+ requests=1,
178
+ request_tokens=5,
179
+ response_tokens=10,
180
+ total_tokens=15,
181
+ details={'input_tokens': 5, 'output_tokens': 10},
182
+ ),
172
183
  model_name='claude-3-5-haiku-123',
173
184
  timestamp=IsNow(tz=timezone.utc),
174
185
  ),
175
186
  ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
176
187
  ModelResponse(
177
188
  parts=[TextPart(content='world')],
189
+ usage=Usage(
190
+ requests=1,
191
+ request_tokens=5,
192
+ response_tokens=10,
193
+ total_tokens=15,
194
+ details={'input_tokens': 5, 'output_tokens': 10},
195
+ ),
178
196
  model_name='claude-3-5-haiku-123',
179
197
  timestamp=IsNow(tz=timezone.utc),
180
198
  ),
@@ -258,6 +276,13 @@ async def test_request_structured_response(allow_model_requests: None):
258
276
  tool_call_id='123',
259
277
  )
260
278
  ],
279
+ usage=Usage(
280
+ requests=1,
281
+ request_tokens=3,
282
+ response_tokens=5,
283
+ total_tokens=8,
284
+ details={'input_tokens': 3, 'output_tokens': 5},
285
+ ),
261
286
  model_name='claude-3-5-haiku-123',
262
287
  timestamp=IsNow(tz=timezone.utc),
263
288
  ),
@@ -320,6 +345,13 @@ async def test_request_tool_call(allow_model_requests: None):
320
345
  tool_call_id='1',
321
346
  )
322
347
  ],
348
+ usage=Usage(
349
+ requests=1,
350
+ request_tokens=2,
351
+ response_tokens=1,
352
+ total_tokens=3,
353
+ details={'input_tokens': 2, 'output_tokens': 1},
354
+ ),
323
355
  model_name='claude-3-5-haiku-123',
324
356
  timestamp=IsNow(tz=timezone.utc),
325
357
  ),
@@ -341,6 +373,13 @@ async def test_request_tool_call(allow_model_requests: None):
341
373
  tool_call_id='2',
342
374
  )
343
375
  ],
376
+ usage=Usage(
377
+ requests=1,
378
+ request_tokens=3,
379
+ response_tokens=2,
380
+ total_tokens=5,
381
+ details={'input_tokens': 3, 'output_tokens': 2},
382
+ ),
344
383
  model_name='claude-3-5-haiku-123',
345
384
  timestamp=IsNow(tz=timezone.utc),
346
385
  ),
@@ -356,6 +395,13 @@ async def test_request_tool_call(allow_model_requests: None):
356
395
  ),
357
396
  ModelResponse(
358
397
  parts=[TextPart(content='final response')],
398
+ usage=Usage(
399
+ requests=1,
400
+ request_tokens=3,
401
+ response_tokens=5,
402
+ total_tokens=8,
403
+ details={'input_tokens': 3, 'output_tokens': 5},
404
+ ),
359
405
  model_name='claude-3-5-haiku-123',
360
406
  timestamp=IsNow(tz=timezone.utc),
361
407
  ),
@@ -692,6 +738,18 @@ async def test_image_as_binary_content_tool_response(
692
738
  TextPart(content='Let me get the image and check what fruit is shown.'),
693
739
  ToolCallPart(tool_name='get_image', args={}, tool_call_id='toolu_01VMGXdexE1Fy5xdWgoom9Te'),
694
740
  ],
741
+ usage=Usage(
742
+ requests=1,
743
+ request_tokens=372,
744
+ response_tokens=49,
745
+ total_tokens=421,
746
+ details={
747
+ 'cache_creation_input_tokens': 0,
748
+ 'cache_read_input_tokens': 0,
749
+ 'input_tokens': 372,
750
+ 'output_tokens': 49,
751
+ },
752
+ ),
695
753
  model_name='claude-3-5-sonnet-20241022',
696
754
  timestamp=IsDatetime(),
697
755
  ),
@@ -718,6 +776,18 @@ async def test_image_as_binary_content_tool_response(
718
776
  content="The image shows a kiwi fruit that has been cut in half, displaying its characteristic bright green flesh with small black seeds arranged in a circular pattern around a white center core. The kiwi's fuzzy brown skin is visible around the edges of the slice."
719
777
  )
720
778
  ],
779
+ usage=Usage(
780
+ requests=1,
781
+ request_tokens=2025,
782
+ response_tokens=57,
783
+ total_tokens=2082,
784
+ details={
785
+ 'cache_creation_input_tokens': 0,
786
+ 'cache_read_input_tokens': 0,
787
+ 'input_tokens': 2025,
788
+ 'output_tokens': 57,
789
+ },
790
+ ),
721
791
  model_name='claude-3-5-sonnet-20241022',
722
792
  timestamp=IsDatetime(),
723
793
  ),
@@ -838,8 +908,91 @@ async def test_anthropic_model_instructions(allow_model_requests: None, anthropi
838
908
  ),
839
909
  ModelResponse(
840
910
  parts=[TextPart(content='The capital of France is Paris.')],
911
+ usage=Usage(
912
+ requests=1,
913
+ request_tokens=20,
914
+ response_tokens=10,
915
+ total_tokens=30,
916
+ details={
917
+ 'cache_creation_input_tokens': 0,
918
+ 'cache_read_input_tokens': 0,
919
+ 'input_tokens': 20,
920
+ 'output_tokens': 10,
921
+ },
922
+ ),
841
923
  model_name='claude-3-opus-20240229',
842
924
  timestamp=IsDatetime(),
843
925
  ),
844
926
  ]
845
927
  )
928
+
929
+
930
+ def anth_msg(usage: AnthropicUsage) -> AnthropicMessage:
931
+ return AnthropicMessage(
932
+ id='x',
933
+ content=[],
934
+ model='claude-3-7-sonnet-latest',
935
+ role='assistant',
936
+ type='message',
937
+ usage=usage,
938
+ )
939
+
940
+
941
+ @pytest.mark.parametrize(
942
+ 'message_callback,usage',
943
+ [
944
+ pytest.param(
945
+ lambda: anth_msg(AnthropicUsage(input_tokens=1, output_tokens=1)),
946
+ snapshot(
947
+ Usage(
948
+ request_tokens=1, response_tokens=1, total_tokens=2, details={'input_tokens': 1, 'output_tokens': 1}
949
+ )
950
+ ),
951
+ id='AnthropicMessage',
952
+ ),
953
+ pytest.param(
954
+ lambda: anth_msg(
955
+ AnthropicUsage(
956
+ input_tokens=1, output_tokens=1, cache_creation_input_tokens=2, cache_read_input_tokens=3
957
+ )
958
+ ),
959
+ snapshot(
960
+ Usage(
961
+ request_tokens=6,
962
+ response_tokens=1,
963
+ total_tokens=7,
964
+ details={
965
+ 'cache_creation_input_tokens': 2,
966
+ 'cache_read_input_tokens': 3,
967
+ 'input_tokens': 1,
968
+ 'output_tokens': 1,
969
+ },
970
+ )
971
+ ),
972
+ id='AnthropicMessage-cached',
973
+ ),
974
+ pytest.param(
975
+ lambda: RawMessageStartEvent(
976
+ message=anth_msg(AnthropicUsage(input_tokens=1, output_tokens=1)), type='message_start'
977
+ ),
978
+ snapshot(
979
+ Usage(
980
+ request_tokens=1, response_tokens=1, total_tokens=2, details={'input_tokens': 1, 'output_tokens': 1}
981
+ )
982
+ ),
983
+ id='RawMessageStartEvent',
984
+ ),
985
+ pytest.param(
986
+ lambda: RawMessageDeltaEvent(
987
+ delta=Delta(),
988
+ usage=MessageDeltaUsage(output_tokens=5),
989
+ type='message_delta',
990
+ ),
991
+ snapshot(Usage(response_tokens=5, total_tokens=5, details={'output_tokens': 5})),
992
+ id='RawMessageDeltaEvent',
993
+ ),
994
+ pytest.param(lambda: RawMessageStopEvent(type='message_stop'), snapshot(Usage()), id='RawMessageStopEvent'),
995
+ ],
996
+ )
997
+ def test_usage(message_callback: Callable[[], AnthropicMessage | RawMessageStreamEvent], usage: Usage):
998
+ assert _map_usage(message_callback()) == usage
@@ -75,6 +75,7 @@ async def test_bedrock_model(allow_model_requests: None, bedrock_provider: Bedro
75
75
  content="Hello! How can I assist you today? Whether you have questions, need information, or just want to chat, I'm here to help."
76
76
  )
77
77
  ],
78
+ usage=Usage(requests=1, request_tokens=7, response_tokens=30, total_tokens=37),
78
79
  model_name='us.amazon.nova-micro-v1:0',
79
80
  timestamp=IsDatetime(),
80
81
  ),
@@ -132,6 +133,7 @@ async def test_bedrock_model_structured_response(allow_model_requests: None, bed
132
133
  tool_call_id='tooluse_5WEci1UmQ8ifMFkUcy2gHQ',
133
134
  ),
134
135
  ],
136
+ usage=Usage(requests=1, request_tokens=551, response_tokens=132, total_tokens=683),
135
137
  model_name='us.amazon.nova-micro-v1:0',
136
138
  timestamp=IsDatetime(),
137
139
  ),
@@ -156,6 +158,7 @@ async def test_bedrock_model_structured_response(allow_model_requests: None, bed
156
158
  tool_call_id='tooluse_9AjloJSaQDKmpPFff-2Clg',
157
159
  ),
158
160
  ],
161
+ usage=Usage(requests=1, request_tokens=685, response_tokens=166, total_tokens=851),
159
162
  model_name='us.amazon.nova-micro-v1:0',
160
163
  timestamp=IsDatetime(),
161
164
  ),
@@ -255,6 +258,7 @@ async def test_bedrock_model_retry(allow_model_requests: None, bedrock_provider:
255
258
  tool_call_id='tooluse_F8LnaCMtQ0-chKTnPhNH2g',
256
259
  ),
257
260
  ],
261
+ usage=Usage(requests=1, request_tokens=417, response_tokens=69, total_tokens=486),
258
262
  model_name='us.amazon.nova-micro-v1:0',
259
263
  timestamp=IsDatetime(),
260
264
  ),
@@ -278,6 +282,7 @@ I'm sorry, but the tool I have does not support retrieving the capital of France
278
282
  """
279
283
  )
280
284
  ],
285
+ usage=Usage(requests=1, request_tokens=509, response_tokens=108, total_tokens=617),
281
286
  model_name='us.amazon.nova-micro-v1:0',
282
287
  timestamp=IsDatetime(),
283
288
  ),
@@ -358,7 +363,7 @@ async def test_bedrock_model_iter_stream(allow_model_requests: None, bedrock_pro
358
363
  Args:
359
364
  city: The city name.
360
365
  """
361
- return '30°C' # pragma: no cover
366
+ return '30°C'
362
367
 
363
368
  event_parts: list[Any] = []
364
369
  async with agent.iter(user_prompt='What is the temperature of the capital of France?') as agent_run:
@@ -544,6 +549,7 @@ async def test_bedrock_model_instructions(allow_model_requests: None, bedrock_pr
544
549
  content='The capital of France is Paris. Paris is not only the political and economic hub of the country but also a major center for culture, fashion, art, and tourism. It is renowned for its rich history, iconic landmarks such as the Eiffel Tower, Notre-Dame Cathedral, and the Louvre Museum, as well as its influence on global culture and cuisine.'
545
550
  )
546
551
  ],
552
+ usage=Usage(requests=1, request_tokens=13, response_tokens=71, total_tokens=84),
547
553
  model_name='us.amazon.nova-pro-v1:0',
548
554
  timestamp=IsDatetime(),
549
555
  ),
@@ -581,3 +587,45 @@ async def test_bedrock_multiple_documents_in_history(
581
587
  assert result.output == snapshot(
582
588
  'Based on the documents you\'ve shared, both Document 1.pdf and Document 2.pdf contain the text "Dummy PDF file". These appear to be placeholder or sample PDF documents rather than files with substantial content.'
583
589
  )
590
+
591
+
592
+ async def test_bedrock_group_consecutive_tool_return_parts(bedrock_provider: BedrockProvider):
593
+ """
594
+ Test that consecutive ToolReturnPart objects are grouped into a single user message for Bedrock.
595
+ """
596
+ model = BedrockConverseModel('us.amazon.nova-micro-v1:0', provider=bedrock_provider)
597
+ now = datetime.datetime.now()
598
+ # Create a ModelRequest with 3 consecutive ToolReturnParts
599
+ req = [
600
+ ModelRequest(parts=[UserPromptPart(content=['Hello'])]),
601
+ ModelResponse(parts=[TextPart(content='Hi')]),
602
+ ModelRequest(parts=[UserPromptPart(content=['How are you?'])]),
603
+ ModelResponse(parts=[TextPart(content='Cloudy')]),
604
+ ModelRequest(
605
+ parts=[
606
+ ToolReturnPart(tool_name='tool1', content='result1', tool_call_id='id1', timestamp=now),
607
+ ToolReturnPart(tool_name='tool2', content='result2', tool_call_id='id2', timestamp=now),
608
+ ToolReturnPart(tool_name='tool3', content='result3', tool_call_id='id3', timestamp=now),
609
+ ]
610
+ ),
611
+ ]
612
+
613
+ # Call the mapping function directly
614
+ _, bedrock_messages = await model._map_messages(req) # type: ignore[reportPrivateUsage]
615
+
616
+ assert bedrock_messages == snapshot(
617
+ [
618
+ {'role': 'user', 'content': [{'text': 'Hello'}]},
619
+ {'role': 'assistant', 'content': [{'text': 'Hi'}]},
620
+ {'role': 'user', 'content': [{'text': 'How are you?'}]},
621
+ {'role': 'assistant', 'content': [{'text': 'Cloudy'}]},
622
+ {
623
+ 'role': 'user',
624
+ 'content': [
625
+ {'toolResult': {'toolUseId': 'id1', 'content': [{'text': 'result1'}], 'status': 'success'}},
626
+ {'toolResult': {'toolUseId': 'id2', 'content': [{'text': 'result2'}], 'status': 'success'}},
627
+ {'toolResult': {'toolUseId': 'id3', 'content': [{'text': 'result3'}], 'status': 'success'}},
628
+ ],
629
+ },
630
+ ]
631
+ )
@@ -66,9 +66,7 @@ class MockAsyncClientV2:
66
66
  def create_mock(cls, completions: MockChatResponse | Sequence[MockChatResponse]) -> AsyncClientV2:
67
67
  return cast(AsyncClientV2, cls(completions=completions))
68
68
 
69
- async def chat( # pragma: no cover
70
- self, *_args: Any, **_kwargs: Any
71
- ) -> ChatResponse:
69
+ async def chat(self, *_args: Any, **_kwargs: Any) -> ChatResponse:
72
70
  assert self.completions is not None
73
71
  if isinstance(self.completions, Sequence):
74
72
  raise_if_exception(self.completions[self.index])
@@ -115,11 +113,17 @@ async def test_request_simple_success(allow_model_requests: None):
115
113
  [
116
114
  ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
117
115
  ModelResponse(
118
- parts=[TextPart(content='world')], model_name='command-r7b-12-2024', timestamp=IsNow(tz=timezone.utc)
116
+ parts=[TextPart(content='world')],
117
+ usage=Usage(requests=1),
118
+ model_name='command-r7b-12-2024',
119
+ timestamp=IsNow(tz=timezone.utc),
119
120
  ),
120
121
  ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
121
122
  ModelResponse(
122
- parts=[TextPart(content='world')], model_name='command-r7b-12-2024', timestamp=IsNow(tz=timezone.utc)
123
+ parts=[TextPart(content='world')],
124
+ usage=Usage(requests=1),
125
+ model_name='command-r7b-12-2024',
126
+ timestamp=IsNow(tz=timezone.utc),
123
127
  ),
124
128
  ]
125
129
  )
@@ -187,6 +191,7 @@ async def test_request_structured_response(allow_model_requests: None):
187
191
  tool_call_id='123',
188
192
  )
189
193
  ],
194
+ usage=Usage(requests=1),
190
195
  model_name='command-r7b-12-2024',
191
196
  timestamp=IsNow(tz=timezone.utc),
192
197
  ),
@@ -273,6 +278,7 @@ async def test_request_tool_call(allow_model_requests: None):
273
278
  tool_call_id='1',
274
279
  )
275
280
  ],
281
+ usage=Usage(requests=1, total_tokens=0, details={}),
276
282
  model_name='command-r7b-12-2024',
277
283
  timestamp=IsNow(tz=timezone.utc),
278
284
  ),
@@ -294,6 +300,13 @@ async def test_request_tool_call(allow_model_requests: None):
294
300
  tool_call_id='2',
295
301
  )
296
302
  ],
303
+ usage=Usage(
304
+ requests=1,
305
+ request_tokens=5,
306
+ response_tokens=3,
307
+ total_tokens=8,
308
+ details={'input_tokens': 4, 'output_tokens': 2},
309
+ ),
297
310
  model_name='command-r7b-12-2024',
298
311
  timestamp=IsNow(tz=timezone.utc),
299
312
  ),
@@ -309,6 +322,7 @@ async def test_request_tool_call(allow_model_requests: None):
309
322
  ),
310
323
  ModelResponse(
311
324
  parts=[TextPart(content='final response')],
325
+ usage=Usage(requests=1),
312
326
  model_name='command-r7b-12-2024',
313
327
  timestamp=IsNow(tz=timezone.utc),
314
328
  ),
@@ -386,6 +400,13 @@ async def test_cohere_model_instructions(allow_model_requests: None, co_api_key:
386
400
  content="The capital of France is Paris. It is the country's largest city and serves as the economic, cultural, and political center of France. Paris is known for its rich history, iconic landmarks such as the Eiffel Tower and the Louvre Museum, and its significant influence on fashion, cuisine, and the arts."
387
401
  )
388
402
  ],
403
+ usage=Usage(
404
+ requests=1,
405
+ request_tokens=542,
406
+ response_tokens=63,
407
+ total_tokens=605,
408
+ details={'input_tokens': 13, 'output_tokens': 61},
409
+ ),
389
410
  model_name='command-r7b-12-2024',
390
411
  timestamp=IsDatetime(),
391
412
  ),