pydantic-ai 0.0.55__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai might be problematic. Click here for more details.

Files changed (164) hide show
  1. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/PKG-INFO +19 -19
  2. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/README.md +16 -16
  3. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/pyproject.toml +5 -2
  4. pydantic_ai-0.1.0/tests/assets/small_video.mp4 +0 -0
  5. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/conftest.py +7 -0
  6. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_dataset.py +11 -11
  7. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_llm_as_a_judge.py +7 -7
  8. pydantic_ai-0.1.0/tests/models/cassettes/test_anthropic/test_anthropic_model_instructions.yaml +56 -0
  9. pydantic_ai-0.1.0/tests/models/cassettes/test_bedrock/test_bedrock_model_guardrail_config.yaml +52 -0
  10. pydantic_ai-0.1.0/tests/models/cassettes/test_bedrock/test_bedrock_model_instructions.yaml +46 -0
  11. pydantic_ai-0.1.0/tests/models/cassettes/test_bedrock/test_bedrock_model_other_parameters.yaml +47 -0
  12. pydantic_ai-0.1.0/tests/models/cassettes/test_bedrock/test_bedrock_model_performance_config.yaml +48 -0
  13. pydantic_ai-0.1.0/tests/models/cassettes/test_bedrock/test_video_as_binary_content_input.yaml +44 -0
  14. pydantic_ai-0.1.0/tests/models/cassettes/test_bedrock/test_video_url_input.yaml +643 -0
  15. pydantic_ai-0.1.0/tests/models/cassettes/test_cohere/test_cohere_model_instructions.yaml +68 -0
  16. pydantic_ai-0.1.0/tests/models/cassettes/test_gemini/test_gemini_model_instructions.yaml +66 -0
  17. pydantic_ai-0.1.0/tests/models/cassettes/test_groq/test_groq_model_instructions.yaml +71 -0
  18. pydantic_ai-0.1.0/tests/models/cassettes/test_mistral/test_mistral_model_instructions.yaml +48 -0
  19. pydantic_ai-0.1.0/tests/models/cassettes/test_openai/test_openai_instructions.yaml +80 -0
  20. pydantic_ai-0.1.0/tests/models/cassettes/test_openai_responses/test_openai_responses_model_instructions.yaml +91 -0
  21. pydantic_ai-0.0.55/tests/models/cassettes/test_openai_responses/test_openai_responses_result_type.yaml → pydantic_ai-0.1.0/tests/models/cassettes/test_openai_responses/test_openai_responses_output_type.yaml +2 -2
  22. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_anthropic.py +38 -13
  23. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_bedrock.py +110 -13
  24. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_cohere.py +38 -8
  25. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_fallback.py +6 -6
  26. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_gemini.py +56 -33
  27. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_groq.py +33 -12
  28. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_instrumented.py +14 -14
  29. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_mistral.py +61 -29
  30. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_model_function.py +27 -27
  31. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_model_test.py +32 -30
  32. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_openai.py +58 -65
  33. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_openai_responses.py +37 -14
  34. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_azure.py +1 -1
  35. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_google_vertex.py +1 -1
  36. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_agent.py +252 -143
  37. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_cli.py +2 -2
  38. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_deps.py +5 -5
  39. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_examples.py +3 -0
  40. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_format_as_xml.py +1 -1
  41. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_live.py +18 -8
  42. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_logfire.py +90 -3
  43. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_mcp.py +1 -1
  44. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_messages.py +30 -1
  45. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_settings.py +2 -2
  46. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_streaming.py +62 -62
  47. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_tools.py +35 -35
  48. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_usage_limits.py +8 -8
  49. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/typed_agent.py +16 -20
  50. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/.gitignore +0 -0
  51. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/LICENSE +0 -0
  52. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/Makefile +0 -0
  53. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/__init__.py +0 -0
  54. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/assets/dummy.pdf +0 -0
  55. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/assets/kiwi.png +0 -0
  56. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/assets/marcelo.mp3 +0 -0
  57. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +0 -0
  58. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[anthropic].yaml +0 -0
  59. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[bedrock].yaml +0 -0
  60. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[cohere].yaml +0 -0
  61. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[gemini].yaml +0 -0
  62. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[groq].yaml +0 -0
  63. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[mistral].yaml +0 -0
  64. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/cassettes/test_settings/test_stop_settings[openai].yaml +0 -0
  65. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/__init__.py +0 -0
  66. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_evaluator_base.py +0 -0
  67. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_evaluator_common.py +0 -0
  68. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_evaluator_context.py +0 -0
  69. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_evaluator_spec.py +0 -0
  70. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_evaluators.py +0 -0
  71. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_otel.py +0 -0
  72. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_render_numbers.py +0 -0
  73. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_reporting.py +0 -0
  74. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_reports.py +0 -0
  75. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/test_utils.py +0 -0
  76. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/evals/utils.py +0 -0
  77. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/example_modules/README.md +0 -0
  78. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/example_modules/bank_database.py +0 -0
  79. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/example_modules/fake_database.py +0 -0
  80. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/example_modules/weather_service.py +0 -0
  81. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/__init__.py +0 -0
  82. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/test_file_persistence.py +0 -0
  83. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/test_graph.py +0 -0
  84. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/test_mermaid.py +0 -0
  85. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/test_persistence.py +0 -0
  86. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/test_state.py +0 -0
  87. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/graph/test_utils.py +0 -0
  88. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/import_examples.py +0 -0
  89. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/json_body_serializer.py +0 -0
  90. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/mcp_server.py +0 -0
  91. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/__init__.py +0 -0
  92. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
  93. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
  94. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
  95. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
  96. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
  97. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
  98. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_empty_system_prompt.yaml +0 -0
  99. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
  100. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
  101. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
  102. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
  103. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
  104. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
  105. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
  106. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
  107. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
  108. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
  109. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
  110. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
  111. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
  112. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_cohere/test_request_simple_success_with_vcr.yaml +0 -0
  113. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
  114. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_gemini/test_gemini_drop_exclusive_maximum.yaml +0 -0
  115. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_gemini/test_gemini_exclusive_minimum_and_maximum.yaml +0 -0
  116. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
  117. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
  118. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
  119. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
  120. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
  121. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
  122. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
  123. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4.5-preview].yaml +0 -0
  124. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4o-mini].yaml +0 -0
  125. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_max_completion_tokens[o3-mini].yaml +0 -0
  126. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_multiple_agent_tool_calls.yaml +0 -0
  127. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_openai_model_without_system_prompt.yaml +0 -0
  128. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
  129. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
  130. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai/test_user_id.yaml +0 -0
  131. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_audio_as_binary_content_input.yaml +0 -0
  132. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_input.yaml +0 -0
  133. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_as_binary_content_input.yaml +0 -0
  134. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_url_input.yaml +0 -0
  135. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_image_url_input.yaml +0 -0
  136. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_builtin_tools.yaml +0 -0
  137. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_http_error.yaml +0 -0
  138. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_retry.yaml +0 -0
  139. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response.yaml +0 -0
  140. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response_with_tool_call.yaml +0 -0
  141. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_effort.yaml +0 -0
  142. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_generate_summary.yaml +0 -0
  143. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_stream.yaml +0 -0
  144. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_system_prompt.yaml +0 -0
  145. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/cassettes/test_openai_responses/test_openai_responses_text_document_url_input.yaml +0 -0
  146. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/mock_async_stream.py +0 -0
  147. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_model.py +0 -0
  148. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/models/test_model_names.py +0 -0
  149. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/__init__.py +0 -0
  150. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
  151. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/cassettes/test_google_vertex/test_vertexai_provider.yaml +0 -0
  152. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_anthropic.py +0 -0
  153. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_bedrock.py +0 -0
  154. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_cohere.py +0 -0
  155. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_deepseek.py +0 -0
  156. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_google_gla.py +0 -0
  157. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_groq.py +0 -0
  158. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_mistral.py +0 -0
  159. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_openai.py +0 -0
  160. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/providers/test_provider_names.py +0 -0
  161. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_json_body_serializer.py +0 -0
  162. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_parts_manager.py +0 -0
  163. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/test_utils.py +0 -0
  164. {pydantic_ai-0.0.55 → pydantic_ai-0.1.0}/tests/typed_graph.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai
3
- Version: 0.0.55
3
+ Version: 0.1.0
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
28
28
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.9
31
- Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.0.55
31
+ Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.0
32
32
  Provides-Extra: examples
33
- Requires-Dist: pydantic-ai-examples==0.0.55; extra == 'examples'
33
+ Requires-Dist: pydantic-ai-examples==0.1.0; extra == 'examples'
34
34
  Provides-Extra: logfire
35
35
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
36
36
  Description-Content-Type: text/markdown
@@ -86,14 +86,14 @@ Designed to make [type checking](https://ai.pydantic.dev/agents/#static-type-che
86
86
  Leverages Python's familiar control flow and agent composition to build your AI-driven projects, making it easy to apply standard Python best practices you'd use in any other (non-AI) project.
87
87
 
88
88
  * __Structured Responses__
89
- Harnesses the power of [Pydantic](https://docs.pydantic.dev/latest/) to [validate and structure](https://ai.pydantic.dev/results/#structured-result-validation) model outputs, ensuring responses are consistent across runs.
89
+ Harnesses the power of [Pydantic](https://docs.pydantic.dev/latest/) to [validate and structure](https://ai.pydantic.dev/output/#structured-output) model outputs, ensuring responses are consistent across runs.
90
90
 
91
91
  * __Dependency Injection System__
92
- Offers an optional [dependency injection](https://ai.pydantic.dev/dependencies/) system to provide data and services to your agent's [system prompts](https://ai.pydantic.dev/agents/#system-prompts), [tools](https://ai.pydantic.dev/tools/) and [result validators](https://ai.pydantic.dev/results/#result-validators-functions).
92
+ Offers an optional [dependency injection](https://ai.pydantic.dev/dependencies/) system to provide data and services to your agent's [system prompts](https://ai.pydantic.dev/agents/#system-prompts), [tools](https://ai.pydantic.dev/tools/) and [output validators](https://ai.pydantic.dev/output/#output-validator-functions).
93
93
  This is useful for testing and eval-driven iterative development.
94
94
 
95
95
  * __Streamed Responses__
96
- Provides the ability to [stream](https://ai.pydantic.dev/results/#streamed-results) LLM outputs continuously, with immediate validation, ensuring rapid and accurate results.
96
+ Provides the ability to [stream](https://ai.pydantic.dev/output/#streamed-results) LLM outputs continuously, with immediate validation, ensuring rapid and accurate outputs.
97
97
 
98
98
  * __Graph Support__
99
99
  [Pydantic Graph](https://ai.pydantic.dev/graph) provides a powerful way to define graphs using typing hints, this is useful in complex applications where standard control flow can degrade to spaghetti code.
@@ -117,7 +117,7 @@ agent = Agent(
117
117
  # Here the exchange should be very short: PydanticAI will send the system prompt and the user query to the LLM,
118
118
  # the model will return a text response. See below for a more complex run.
119
119
  result = agent.run_sync('Where does "hello world" come from?')
120
- print(result.data)
120
+ print(result.output)
121
121
  """
122
122
  The first known use of "hello, world" was in a 1974 textbook about the C programming language.
123
123
  """
@@ -150,22 +150,22 @@ class SupportDependencies:
150
150
  db: DatabaseConn
151
151
 
152
152
 
153
- # This pydantic model defines the structure of the result returned by the agent.
154
- class SupportResult(BaseModel):
153
+ # This pydantic model defines the structure of the output returned by the agent.
154
+ class SupportOutput(BaseModel):
155
155
  support_advice: str = Field(description='Advice returned to the customer')
156
156
  block_card: bool = Field(description="Whether to block the customer's card")
157
157
  risk: int = Field(description='Risk level of query', ge=0, le=10)
158
158
 
159
159
 
160
160
  # This agent will act as first-tier support in a bank.
161
- # Agents are generic in the type of dependencies they accept and the type of result they return.
162
- # In this case, the support agent has type `Agent[SupportDependencies, SupportResult]`.
161
+ # Agents are generic in the type of dependencies they accept and the type of output they return.
162
+ # In this case, the support agent has type `Agent[SupportDependencies, SupportOutput]`.
163
163
  support_agent = Agent(
164
164
  'openai:gpt-4o',
165
165
  deps_type=SupportDependencies,
166
- # The response from the agent will, be guaranteed to be a SupportResult,
166
+ # The response from the agent will, be guaranteed to be a SupportOutput,
167
167
  # if validation fails the agent is prompted to try again.
168
- result_type=SupportResult,
168
+ output_type=SupportOutput,
169
169
  system_prompt=(
170
170
  'You are a support agent in our bank, give the '
171
171
  'customer support and judge the risk level of their query.'
@@ -187,7 +187,7 @@ async def add_customer_name(ctx: RunContext[SupportDependencies]) -> str:
187
187
  # Pydantic is used to validate these arguments, and errors are passed back to the LLM so it can retry.
188
188
  @support_agent.tool
189
189
  async def customer_balance(
190
- ctx: RunContext[SupportDependencies], include_pending: bool
190
+ ctx: RunContext[SupportDependencies], include_pending: bool
191
191
  ) -> float:
192
192
  """Returns the customer's current account balance."""
193
193
  # The docstring of a tool is also passed to the LLM as the description of the tool.
@@ -205,17 +205,17 @@ async def customer_balance(
205
205
  async def main():
206
206
  deps = SupportDependencies(customer_id=123, db=DatabaseConn())
207
207
  # Run the agent asynchronously, conducting a conversation with the LLM until a final response is reached.
208
- # Even in this fairly simple case, the agent will exchange multiple messages with the LLM as tools are called to retrieve a result.
208
+ # Even in this fairly simple case, the agent will exchange multiple messages with the LLM as tools are called to retrieve an output.
209
209
  result = await support_agent.run('What is my balance?', deps=deps)
210
- # The result will be validated with Pydantic to guarantee it is a `SupportResult`, since the agent is generic,
211
- # it'll also be typed as a `SupportResult` to aid with static type checking.
212
- print(result.data)
210
+ # The `result.output` will be validated with Pydantic to guarantee it is a `SupportOutput`. Since the agent is generic,
211
+ # it'll also be typed as a `SupportOutput` to aid with static type checking.
212
+ print(result.output)
213
213
  """
214
214
  support_advice='Hello John, your current account balance, including pending transactions, is $123.45.' block_card=False risk=1
215
215
  """
216
216
 
217
217
  result = await support_agent.run('I just lost my card!', deps=deps)
218
- print(result.data)
218
+ print(result.output)
219
219
  """
220
220
  support_advice="I'm sorry to hear that, John. We are temporarily blocking your card to prevent unauthorized transactions." block_card=True risk=8
221
221
  """
@@ -49,14 +49,14 @@ Designed to make [type checking](https://ai.pydantic.dev/agents/#static-type-che
49
49
  Leverages Python's familiar control flow and agent composition to build your AI-driven projects, making it easy to apply standard Python best practices you'd use in any other (non-AI) project.
50
50
 
51
51
  * __Structured Responses__
52
- Harnesses the power of [Pydantic](https://docs.pydantic.dev/latest/) to [validate and structure](https://ai.pydantic.dev/results/#structured-result-validation) model outputs, ensuring responses are consistent across runs.
52
+ Harnesses the power of [Pydantic](https://docs.pydantic.dev/latest/) to [validate and structure](https://ai.pydantic.dev/output/#structured-output) model outputs, ensuring responses are consistent across runs.
53
53
 
54
54
  * __Dependency Injection System__
55
- Offers an optional [dependency injection](https://ai.pydantic.dev/dependencies/) system to provide data and services to your agent's [system prompts](https://ai.pydantic.dev/agents/#system-prompts), [tools](https://ai.pydantic.dev/tools/) and [result validators](https://ai.pydantic.dev/results/#result-validators-functions).
55
+ Offers an optional [dependency injection](https://ai.pydantic.dev/dependencies/) system to provide data and services to your agent's [system prompts](https://ai.pydantic.dev/agents/#system-prompts), [tools](https://ai.pydantic.dev/tools/) and [output validators](https://ai.pydantic.dev/output/#output-validator-functions).
56
56
  This is useful for testing and eval-driven iterative development.
57
57
 
58
58
  * __Streamed Responses__
59
- Provides the ability to [stream](https://ai.pydantic.dev/results/#streamed-results) LLM outputs continuously, with immediate validation, ensuring rapid and accurate results.
59
+ Provides the ability to [stream](https://ai.pydantic.dev/output/#streamed-results) LLM outputs continuously, with immediate validation, ensuring rapid and accurate outputs.
60
60
 
61
61
  * __Graph Support__
62
62
  [Pydantic Graph](https://ai.pydantic.dev/graph) provides a powerful way to define graphs using typing hints, this is useful in complex applications where standard control flow can degrade to spaghetti code.
@@ -80,7 +80,7 @@ agent = Agent(
80
80
  # Here the exchange should be very short: PydanticAI will send the system prompt and the user query to the LLM,
81
81
  # the model will return a text response. See below for a more complex run.
82
82
  result = agent.run_sync('Where does "hello world" come from?')
83
- print(result.data)
83
+ print(result.output)
84
84
  """
85
85
  The first known use of "hello, world" was in a 1974 textbook about the C programming language.
86
86
  """
@@ -113,22 +113,22 @@ class SupportDependencies:
113
113
  db: DatabaseConn
114
114
 
115
115
 
116
- # This pydantic model defines the structure of the result returned by the agent.
117
- class SupportResult(BaseModel):
116
+ # This pydantic model defines the structure of the output returned by the agent.
117
+ class SupportOutput(BaseModel):
118
118
  support_advice: str = Field(description='Advice returned to the customer')
119
119
  block_card: bool = Field(description="Whether to block the customer's card")
120
120
  risk: int = Field(description='Risk level of query', ge=0, le=10)
121
121
 
122
122
 
123
123
  # This agent will act as first-tier support in a bank.
124
- # Agents are generic in the type of dependencies they accept and the type of result they return.
125
- # In this case, the support agent has type `Agent[SupportDependencies, SupportResult]`.
124
+ # Agents are generic in the type of dependencies they accept and the type of output they return.
125
+ # In this case, the support agent has type `Agent[SupportDependencies, SupportOutput]`.
126
126
  support_agent = Agent(
127
127
  'openai:gpt-4o',
128
128
  deps_type=SupportDependencies,
129
- # The response from the agent will, be guaranteed to be a SupportResult,
129
+ # The response from the agent will, be guaranteed to be a SupportOutput,
130
130
  # if validation fails the agent is prompted to try again.
131
- result_type=SupportResult,
131
+ output_type=SupportOutput,
132
132
  system_prompt=(
133
133
  'You are a support agent in our bank, give the '
134
134
  'customer support and judge the risk level of their query.'
@@ -150,7 +150,7 @@ async def add_customer_name(ctx: RunContext[SupportDependencies]) -> str:
150
150
  # Pydantic is used to validate these arguments, and errors are passed back to the LLM so it can retry.
151
151
  @support_agent.tool
152
152
  async def customer_balance(
153
- ctx: RunContext[SupportDependencies], include_pending: bool
153
+ ctx: RunContext[SupportDependencies], include_pending: bool
154
154
  ) -> float:
155
155
  """Returns the customer's current account balance."""
156
156
  # The docstring of a tool is also passed to the LLM as the description of the tool.
@@ -168,17 +168,17 @@ async def customer_balance(
168
168
  async def main():
169
169
  deps = SupportDependencies(customer_id=123, db=DatabaseConn())
170
170
  # Run the agent asynchronously, conducting a conversation with the LLM until a final response is reached.
171
- # Even in this fairly simple case, the agent will exchange multiple messages with the LLM as tools are called to retrieve a result.
171
+ # Even in this fairly simple case, the agent will exchange multiple messages with the LLM as tools are called to retrieve an output.
172
172
  result = await support_agent.run('What is my balance?', deps=deps)
173
- # The result will be validated with Pydantic to guarantee it is a `SupportResult`, since the agent is generic,
174
- # it'll also be typed as a `SupportResult` to aid with static type checking.
175
- print(result.data)
173
+ # The `result.output` will be validated with Pydantic to guarantee it is a `SupportOutput`. Since the agent is generic,
174
+ # it'll also be typed as a `SupportOutput` to aid with static type checking.
175
+ print(result.output)
176
176
  """
177
177
  support_advice='Hello John, your current account balance, including pending transactions, is $123.45.' block_card=False risk=1
178
178
  """
179
179
 
180
180
  result = await support_agent.run('I just lost my card!', deps=deps)
181
- print(result.data)
181
+ print(result.output)
182
182
  """
183
183
  support_advice="I'm sorry to hear that, John. We are temporarily blocking your card to prevent unauthorized transactions." block_card=True risk=8
184
184
  """
@@ -83,10 +83,9 @@ members = [
83
83
  lint = ["mypy>=1.11.2", "pyright>=1.1.390", "ruff>=0.6.9"]
84
84
  docs = [
85
85
  "black>=24.10.0",
86
- "bs4>=0.0.2",
87
- "markdownify>=0.14.1",
88
86
  "mkdocs>=1.6.1",
89
87
  "mkdocs-glightbox>=0.4.0",
88
+ "mkdocs-llmstxt>=0.2.0",
90
89
  "mkdocs-material[imaging]>=9.5.45",
91
90
  "mkdocstrings-python>=1.12.2",
92
91
  ]
@@ -192,6 +191,9 @@ filterwarnings = [
192
191
  # uvicorn (mcp server)
193
192
  "ignore:websockets.legacy is deprecated.*:DeprecationWarning:websockets.legacy",
194
193
  "ignore:websockets.server.WebSocketServerProtocol is deprecated:DeprecationWarning",
194
+ # random resource warnings; I suspect these are coming from vendor SDKs when running examples..
195
+ "ignore:unclosed <socket:ResourceWarning",
196
+ "ignore:unclosed event loop:ResourceWarning"
195
197
  ]
196
198
 
197
199
  # https://coverage.readthedocs.io/en/latest/config.html#run
@@ -218,6 +220,7 @@ exclude_lines = [
218
220
  'if TYPE_CHECKING:',
219
221
  'if typing.TYPE_CHECKING:',
220
222
  '@overload',
223
+ '@deprecated',
221
224
  '@typing.overload',
222
225
  '@abstractmethod',
223
226
  '\(Protocol\):$',
@@ -211,6 +211,7 @@ async def close_cached_httpx_client() -> AsyncIterator[None]:
211
211
  'mistral',
212
212
  'cohere',
213
213
  'deepseek',
214
+ None,
214
215
  ]:
215
216
  await cached_async_http_client(provider=provider).aclose()
216
217
 
@@ -232,6 +233,12 @@ def image_content(assets_path: Path) -> BinaryContent:
232
233
  return BinaryContent(data=image_bytes, media_type='image/png')
233
234
 
234
235
 
236
+ @pytest.fixture(scope='session')
237
+ def video_content(assets_path: Path) -> BinaryContent:
238
+ video_bytes = assets_path.joinpath('small_video.mp4').read_bytes()
239
+ return BinaryContent(data=video_bytes, media_type='video/mp4')
240
+
241
+
235
242
  @pytest.fixture(scope='session')
236
243
  def document_content(assets_path: Path) -> BinaryContent:
237
244
  pdf_bytes = assets_path.joinpath('dummy.pdf').read_bytes()
@@ -688,8 +688,8 @@ async def test_duplicate_evaluator_failure(example_dataset: Dataset[TaskInput, T
688
688
  assert str(exc_info.value) == snapshot("Duplicate evaluator class name: 'FirstEvaluator'")
689
689
 
690
690
 
691
- async def test_invalid_evaluator_result_type(example_dataset: Dataset[TaskInput, TaskOutput, TaskMetadata]):
692
- """Test that an invalid evaluator result type raises an error."""
691
+ async def test_invalid_evaluator_output_type(example_dataset: Dataset[TaskInput, TaskOutput, TaskMetadata]):
692
+ """Test that an invalid evaluator output type raises an error."""
693
693
  invalid_evaluator = Python(expression='...')
694
694
  example_dataset.add_evaluator(invalid_evaluator)
695
695
 
@@ -996,31 +996,31 @@ def test_import_generate_dataset():
996
996
  def test_evaluate_non_serializable_inputs():
997
997
  @dataclass
998
998
  class MyInputs:
999
- result_type: type[str] | type[int]
999
+ output_type: type[str] | type[int]
1000
1000
 
1001
1001
  my_dataset = Dataset[MyInputs, Any, Any](
1002
1002
  cases=[
1003
1003
  Case(
1004
1004
  name='str',
1005
- inputs=MyInputs(result_type=str),
1005
+ inputs=MyInputs(output_type=str),
1006
1006
  expected_output='abc',
1007
1007
  ),
1008
1008
  Case(
1009
1009
  name='int',
1010
- inputs=MyInputs(result_type=int),
1010
+ inputs=MyInputs(output_type=int),
1011
1011
  expected_output=123,
1012
1012
  ),
1013
1013
  ],
1014
1014
  )
1015
1015
 
1016
1016
  async def my_task(my_inputs: MyInputs) -> int | str:
1017
- if issubclass(my_inputs.result_type, str):
1018
- return my_inputs.result_type('abc')
1017
+ if issubclass(my_inputs.output_type, str):
1018
+ return my_inputs.output_type('abc')
1019
1019
  else:
1020
- return my_inputs.result_type(123)
1020
+ return my_inputs.output_type(123)
1021
1021
 
1022
1022
  report = my_dataset.evaluate_sync(task=my_task)
1023
- assert [c.inputs for c in report.cases] == snapshot([MyInputs(result_type=str), MyInputs(result_type=int)])
1023
+ assert [c.inputs for c in report.cases] == snapshot([MyInputs(output_type=str), MyInputs(output_type=int)])
1024
1024
 
1025
1025
  table = report.console_table(include_input=True)
1026
1026
  assert render_table(table) == snapshot("""\
@@ -1028,9 +1028,9 @@ def test_evaluate_non_serializable_inputs():
1028
1028
  ┏━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓
1029
1029
  ┃ Case ID ┃ Inputs ┃ Duration ┃
1030
1030
  ┡━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩
1031
- │ str │ test_evaluate_non_serializable_inputs.<locals>.MyInputs(result_type=<class 'str'>) │ 1.0s │
1031
+ │ str │ test_evaluate_non_serializable_inputs.<locals>.MyInputs(output_type=<class 'str'>) │ 1.0s │
1032
1032
  ├──────────┼────────────────────────────────────────────────────────────────────────────────────┼──────────┤
1033
- │ int │ test_evaluate_non_serializable_inputs.<locals>.MyInputs(result_type=<class 'int'>) │ 1.0s │
1033
+ │ int │ test_evaluate_non_serializable_inputs.<locals>.MyInputs(output_type=<class 'int'>) │ 1.0s │
1034
1034
  ├──────────┼────────────────────────────────────────────────────────────────────────────────────┼──────────┤
1035
1035
  │ Averages │ │ 1.0s │
1036
1036
  └──────────┴────────────────────────────────────────────────────────────────────────────────────┴──────────┘
@@ -70,15 +70,15 @@ async def test_judge_output_mock(mocker: MockerFixture):
70
70
  """Test judge_output function with mocked agent."""
71
71
  # Mock the agent run method
72
72
  mock_result = mocker.MagicMock()
73
- mock_result.data = GradingOutput(reason='Test passed', pass_=True, score=1.0)
73
+ mock_result.output = GradingOutput(reason='Test passed', pass_=True, score=1.0)
74
74
  mock_run = mocker.patch('pydantic_ai.Agent.run', return_value=mock_result)
75
75
 
76
76
  # Test with string output
77
- result = await judge_output('Hello world', 'Content contains a greeting')
78
- assert isinstance(result, GradingOutput)
79
- assert result.reason == 'Test passed'
80
- assert result.pass_ is True
81
- assert result.score == 1.0
77
+ grading_output = await judge_output('Hello world', 'Content contains a greeting')
78
+ assert isinstance(grading_output, GradingOutput)
79
+ assert grading_output.reason == 'Test passed'
80
+ assert grading_output.pass_ is True
81
+ assert grading_output.score == 1.0
82
82
 
83
83
  # Verify the agent was called with correct prompt
84
84
  mock_run.assert_called_once()
@@ -92,7 +92,7 @@ async def test_judge_input_output_mock(mocker: MockerFixture):
92
92
  """Test judge_input_output function with mocked agent."""
93
93
  # Mock the agent run method
94
94
  mock_result = mocker.MagicMock()
95
- mock_result.data = GradingOutput(reason='Test passed', pass_=True, score=1.0)
95
+ mock_result.output = GradingOutput(reason='Test passed', pass_=True, score=1.0)
96
96
  mock_run = mocker.patch('pydantic_ai.Agent.run', return_value=mock_result)
97
97
 
98
98
  # Test with string input and output
@@ -0,0 +1,56 @@
1
+ interactions:
2
+ - request:
3
+ headers:
4
+ accept:
5
+ - application/json
6
+ accept-encoding:
7
+ - gzip, deflate
8
+ connection:
9
+ - keep-alive
10
+ content-length:
11
+ - '202'
12
+ content-type:
13
+ - application/json
14
+ host:
15
+ - api.anthropic.com
16
+ method: POST
17
+ parsed_body:
18
+ max_tokens: 1024
19
+ messages:
20
+ - content:
21
+ - text: What is the capital of France?
22
+ type: text
23
+ role: user
24
+ model: claude-3-opus-latest
25
+ stream: false
26
+ system: You are a helpful assistant.
27
+ uri: https://api.anthropic.com/v1/messages
28
+ response:
29
+ headers:
30
+ connection:
31
+ - keep-alive
32
+ content-length:
33
+ - '328'
34
+ content-type:
35
+ - application/json
36
+ transfer-encoding:
37
+ - chunked
38
+ parsed_body:
39
+ content:
40
+ - text: The capital of France is Paris.
41
+ type: text
42
+ id: msg_01U58nruzfn9BrXrrF2hhb4m
43
+ model: claude-3-opus-20240229
44
+ role: assistant
45
+ stop_reason: end_turn
46
+ stop_sequence: null
47
+ type: message
48
+ usage:
49
+ cache_creation_input_tokens: 0
50
+ cache_read_input_tokens: 0
51
+ input_tokens: 20
52
+ output_tokens: 10
53
+ status:
54
+ code: 200
55
+ message: OK
56
+ version: 1
@@ -0,0 +1,52 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"role": "user", "content": [{"text": "What is the capital of France?"}]}], "system": [{"text": "You
4
+ are a helpful chatbot."}], "guardrailConfig": {"guardrailIdentifier": "guardrailv1", "guardrailVersion": "v1", "trace": "enabled"}'
5
+ headers:
6
+ amz-sdk-invocation-id:
7
+ - !!binary |
8
+ ZmZhYjMyZmItODRjOS00YWZjLWE4NTAtNTQ4OTUxMjI5NmU4
9
+ amz-sdk-request:
10
+ - !!binary |
11
+ YXR0ZW1wdD0x
12
+ content-length:
13
+ - '178'
14
+ content-type:
15
+ - !!binary |
16
+ YXBwbGljYXRpb24vanNvbg==
17
+ method: POST
18
+ uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-micro-v1%3A0/converse
19
+ response:
20
+ headers:
21
+ connection:
22
+ - keep-alive
23
+ content-length:
24
+ - '206'
25
+ content-type:
26
+ - application/json
27
+ parsed_body:
28
+ metrics:
29
+ latencyMs: 159
30
+ output:
31
+ message:
32
+ content:
33
+ - text: The capital of France is Paris. Paris is not only the capital city but also the most populous city in France, known for its significant cultural,
34
+ political, and economic influence both within the country and globally. It is famous for landmarks such as the Eiffel Tower, the Louvre Museum,
35
+ and the Notre-Dame Cathedral, among many other historical and architectural treasures.
36
+
37
+ role: assistant
38
+ stopReason: max_tokens
39
+ usage:
40
+ inputTokens: 13
41
+ outputTokens: 5
42
+ totalTokens: 18
43
+ trace:
44
+ guardrail:
45
+ modelOutput:
46
+ - test
47
+ performanceConfig:
48
+ latency: optimized
49
+ status:
50
+ code: 200
51
+ message: OK
52
+ version: 1
@@ -0,0 +1,46 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"role": "user", "content": [{"text": "What is the capital of France?"}]}], "system": [{"text": "You
4
+ are a helpful assistant."}], "inferenceConfig": {}}'
5
+ headers:
6
+ amz-sdk-invocation-id:
7
+ - !!binary |
8
+ YmM2NjFmZjctZDZhYi00NzI5LWIyNTQtNjc5MzA1MGMyOGU3
9
+ amz-sdk-request:
10
+ - !!binary |
11
+ YXR0ZW1wdD0x
12
+ content-length:
13
+ - '166'
14
+ content-type:
15
+ - !!binary |
16
+ YXBwbGljYXRpb24vanNvbg==
17
+ method: POST
18
+ uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-pro-v1%3A0/converse
19
+ response:
20
+ headers:
21
+ connection:
22
+ - keep-alive
23
+ content-length:
24
+ - '521'
25
+ content-type:
26
+ - application/json
27
+ parsed_body:
28
+ metrics:
29
+ latencyMs: 863
30
+ output:
31
+ message:
32
+ content:
33
+ - text: The capital of France is Paris. Paris is not only the political and economic hub of the country but also
34
+ a major center for culture, fashion, art, and tourism. It is renowned for its rich history, iconic landmarks
35
+ such as the Eiffel Tower, Notre-Dame Cathedral, and the Louvre Museum, as well as its influence on global culture
36
+ and cuisine.
37
+ role: assistant
38
+ stopReason: end_turn
39
+ usage:
40
+ inputTokens: 13
41
+ outputTokens: 71
42
+ totalTokens: 84
43
+ status:
44
+ code: 200
45
+ message: OK
46
+ version: 1
@@ -0,0 +1,47 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"role": "user", "content": [{"text": "What is the capital of France?"}]}], "system": [{"text": "You
4
+ are a helpful chatbot."}], "requestMetadata": {"test": "test"}, "additionalModelResponseFieldPaths": ["test"], "additionalModelRequestFields": {"test": "test"}, "promptVariables": {"leo": {"text": "aaaa"}}'
5
+ headers:
6
+ amz-sdk-invocation-id:
7
+ - !!binary |
8
+ ZmZhYjMyZmItODRjOS00YWZjLWE4NTAtNTQ4OTUxMjI5NmU4
9
+ amz-sdk-request:
10
+ - !!binary |
11
+ YXR0ZW1wdD0x
12
+ content-length:
13
+ - '178'
14
+ content-type:
15
+ - !!binary |
16
+ YXBwbGljYXRpb24vanNvbg==
17
+ method: POST
18
+ uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-micro-v1%3A0/converse
19
+ response:
20
+ headers:
21
+ connection:
22
+ - keep-alive
23
+ content-length:
24
+ - '206'
25
+ content-type:
26
+ - application/json
27
+ parsed_body:
28
+ metrics:
29
+ latencyMs: 159
30
+ output:
31
+ message:
32
+ content:
33
+ - text: The capital of France is Paris. Paris is not only the capital city but also the most populous city in France, known for its significant cultural,
34
+ political, and economic influence both within the country and globally. It is famous for landmarks such as the Eiffel Tower, the Louvre Museum,
35
+ and the Notre-Dame Cathedral, among many other historical and architectural treasures.
36
+
37
+ role: assistant
38
+ stopReason: max_tokens
39
+ usage:
40
+ inputTokens: 13
41
+ outputTokens: 5
42
+ totalTokens: 18
43
+ additionalModelResponseFields: "test"
44
+ status:
45
+ code: 200
46
+ message: OK
47
+ version: 1
@@ -0,0 +1,48 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"role": "user", "content": [{"text": "What is the capital of France?"}]}], "system": [{"text": "You
4
+ are a helpful chatbot."}], "performanceConfig": {"latency": "optimized"}}'
5
+ headers:
6
+ amz-sdk-invocation-id:
7
+ - !!binary |
8
+ ZmZhYjMyZmItODRjOS00YWZjLWE4NTAtNTQ4OTUxMjI5NmU4
9
+ amz-sdk-request:
10
+ - !!binary |
11
+ YXR0ZW1wdD0x
12
+ content-length:
13
+ - '178'
14
+ content-type:
15
+ - !!binary |
16
+ YXBwbGljYXRpb24vanNvbg==
17
+ method: POST
18
+ uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-micro-v1%3A0/converse
19
+ response:
20
+ headers:
21
+ connection:
22
+ - keep-alive
23
+ content-length:
24
+ - '206'
25
+ content-type:
26
+ - application/json
27
+ parsed_body:
28
+ metrics:
29
+ latencyMs: 159
30
+ output:
31
+ message:
32
+ content:
33
+ - text: The capital of France is Paris. Paris is not only the capital city but also the most populous city in France, known for its significant cultural,
34
+ political, and economic influence both within the country and globally. It is famous for landmarks such as the Eiffel Tower, the Louvre Museum,
35
+ and the Notre-Dame Cathedral, among many other historical and architectural treasures.
36
+
37
+ role: assistant
38
+ stopReason: max_tokens
39
+ usage:
40
+ inputTokens: 13
41
+ outputTokens: 5
42
+ totalTokens: 18
43
+ performanceConfig:
44
+ latency: optimized
45
+ status:
46
+ code: 200
47
+ message: OK
48
+ version: 1