pydantic-ai 0.8.0__tar.gz → 0.8.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/PKG-INFO +3 -3
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/conftest.py +3 -3
- pydantic_ai-0.8.1/tests/models/mock_openai.py +161 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_anthropic.py +23 -23
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_cohere.py +2 -2
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_deepseek.py +1 -1
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_fallback.py +4 -4
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_gemini.py +16 -16
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_gemini_vertex.py +2 -2
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_google.py +28 -27
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_groq.py +22 -18
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_huggingface.py +14 -14
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_mistral.py +32 -32
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_model.py +4 -1
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_openai.py +41 -112
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_openai_responses.py +60 -24
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_agent.py +3 -3
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_cli.py +1 -1
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_examples.py +1 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_logfire.py +136 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_mcp.py +27 -27
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_streaming.py +7 -7
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/typed_agent.py +1 -1
- pydantic_ai-0.8.0/tests/models/cassettes/test_openai_responses/test_openai_responses_usage_without_tokens_details.yaml +0 -96
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/.gitignore +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/LICENSE +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/Makefile +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/README.md +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/pyproject.toml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/__init__.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/assets/dummy.pdf +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/assets/kiwi.png +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/assets/marcelo.mp3 +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/assets/product_name.txt +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/assets/small_video.mp4 +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_agent_with_server_not_running.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_audio_resource.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_audio_resource_link.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_dict.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_error.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_image.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_image_resource.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_image_resource_link.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_multiple_items.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_none.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_str.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_text_resource.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_mcp/test_tool_returning_text_resource_link.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[anthropic].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[bedrock].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[cohere].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[gemini].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[google].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[groq].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[mistral].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_settings/test_stop_settings[openai].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_complex_agent_run.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_complex_agent_run_in_workflow.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_complex_agent_run_stream_in_workflow.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_logfire_plugin.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_multiple_agents.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_simple_agent_run_in_workflow.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_iter.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_override_deps_in_workflow.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_run.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_run_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_run_sync.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_sync_tool_activity_disabled.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_with_dataclass_deps_as_dict.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_with_non_dict_deps.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/cassettes/test_temporal/test_temporal_agent_with_unserializable_deps_type.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/__init__.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_dataset.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_evaluator_base.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_evaluator_common.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_evaluator_context.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_evaluator_spec.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_evaluators.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_llm_as_a_judge.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_otel.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_render_numbers.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_reporting.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_reports.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/test_utils.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/evals/utils.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/example_modules/mcp_server.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/ext/__init__.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/ext/test_langchain.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/test_file_persistence.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/test_graph.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/test_mermaid.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/test_persistence.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/test_state.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/graph/test_utils.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/import_examples.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/json_body_serializer.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/mcp_server.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_code_execution_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_model_empty_message_on_history.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_model_thinking_part_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_prompted_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_prompted_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_server_tool_pass_history_to_another_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_server_tool_receive_history_from_another_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_text_output_function.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_tool_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_tool_with_thinking.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_web_search_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_anthropic_web_search_tool_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_extra_headers.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_anthropic_tool_with_thinking.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_empty_system_prompt.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_guardrail_config.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_other_parameters.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_performance_config.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_thinking_part_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_bedrock_multiple_documents_in_history.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_video_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_bedrock/test_video_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_cohere/test_cohere_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_cohere/test_cohere_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_cohere/test_request_simple_success_with_vcr.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_deepseek/test_deepseek_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_deepseek/test_deepseek_model_thinking_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_download_item/test_download_item_application_octet_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_download_item/test_download_item_audio_mpeg.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_download_item/test_download_item_no_content_type.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_false.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_true.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_drop_exclusive_maximum.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_exclusive_minimum_and_maximum.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_native_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_native_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_prompted_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_prompted_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_prompted_output_with_tools.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_text_output_function.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_tool_config_any_with_tool_without_args.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_tool_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_gemini_youtube_video_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_labels_are_ignored_with_gla_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_video_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini/test_video_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_labels.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[AudioUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[AudioUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[DocumentUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[DocumentUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[ImageUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[ImageUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[VideoUrl (YouTube)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[VideoUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input[VideoUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_gemini_vertex/test_url_input_force_download.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_code_execution_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_empty_assistant_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_empty_user_prompt.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_iter_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_max_tokens.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_multiple_documents_in_history.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_receive_web_search_history_from_another_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_retry.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_safety_settings.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_server_tool_receive_history_from_another_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_structured_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_text_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_thinking_config.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_thinking_part_iter.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_top_p.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_url_context_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_usage_limit_exceeded.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_usage_limit_not_exceeded.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_vertex_labels.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_vertex_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_video_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_video_as_binary_content_input_with_vendor_metadata.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_video_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_web_search_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_model_youtube_video_url_input_with_vendor_metadata.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_native_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_native_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_prompted_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_prompted_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_prompted_output_with_tools.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_text_output_function.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_timeout.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_tool_config_any_with_tool_without_args.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_tool_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[AudioUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[AudioUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[DocumentUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[DocumentUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[ImageUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[ImageUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[VideoUrl (YouTube)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[VideoUrl (gs)].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input[VideoUrl].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_url_input_force_download.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_google/test_google_vertexai_model_usage_limit_exceeded.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_extra_headers.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_groq_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_groq_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_groq_model_thinking_part_iter.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_groq_model_web_search_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_hf_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_hf_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_hf_model_thinking_part_iter.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_max_completion_tokens[Qwen-Qwen2.5-72B-Instruct].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_max_completion_tokens[deepseek-ai-DeepSeek-R1-0528].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_max_completion_tokens[meta-llama-Llama-3.3-70B-Instruct].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_request_simple_success_with_vcr.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_request_simple_usage.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_simple_completion.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_huggingface/test_stream_completion.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_mistral/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_mistral/test_mistral_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_mistral/test_mistral_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_model_names/test_known_model_names.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_compatible_api_with_tool_calls_without_id.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_document_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_document_as_binary_content_input_with_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_extra_headers.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_image_url_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_invalid_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4.5-preview].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4o-mini].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_max_completion_tokens[o3-mini].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_multiple_agent_tool_calls.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_audio_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_instructions_with_tool_calls_keep_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_cerebras_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_cerebras_provider_harmony.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_cerebras_provider_qwen_3_coder.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_settings_temperature_ignored_on_gpt_5.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_thinking_part_iter.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_model_without_system_prompt.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_native_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_native_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_prompted_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_prompted_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_responses_model_thinking_part.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_text_output_function.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_tool_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_web_search_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_web_search_tool_model_not_supported.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_openai_web_search_tool_with_user_location.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_reasoning_model_with_temperature.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_system_prompt_role_o1_mini.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_text_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_user_id.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai/test_valid_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_native_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_native_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_code_execution_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_code_execution_tool_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_image_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_builtin_tools.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_http_error.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_instructions.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_retry.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response_with_tool_call.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_web_search_tool.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_web_search_tool_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_web_search_tool_with_invalid_region.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_web_search_tool_with_user_location.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_output_type.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_effort.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_generate_summary.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_stream.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_system_prompt.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_verbosity.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_prompted_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_prompted_output_multiple.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_reasoning_model_with_temperature.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_text_output_function.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/cassettes/test_openai_responses/test_tool_output.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_bedrock.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_download_item.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_instrumented.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_mcp_sampling.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_model_request_parameters.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_model_settings.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/parts_from_messages.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/__init__.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/cassettes/test_google_vertex/test_vertexai_provider.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/cassettes/test_heroku/test_heroku_model_provider_claude_3_7_sonnet.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/cassettes/test_openrouter/test_openrouter_with_google_model.yaml +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_anthropic.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_azure.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_bedrock.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_cerebras.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_cohere.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_deepseek.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_fireworks.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_github.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_google_gla.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_google_vertex.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_grok.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_groq.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_heroku.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_huggingface.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_mistral.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_moonshotai.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_ollama.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_openai.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_openrouter.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_provider_names.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_together.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/providers/test_vercel.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_a2a.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_ag_ui.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_builtin_tools.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_deps.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_direct.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_history_processor.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_json_body_serializer.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_live.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_messages.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_settings.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_temporal.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_tenacity.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_thinking_part.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_tools.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_toolsets.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_usage_limits.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/test_utils.py +0 -0
- {pydantic_ai-0.8.0 → pydantic_ai-0.8.1}/tests/typed_graph.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.1
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -28,11 +28,11 @@ Classifier: Topic :: Internet
|
|
|
28
28
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.9
|
|
31
|
-
Requires-Dist: pydantic-ai-slim[ag-ui,anthropic,bedrock,cli,cohere,evals,google,groq,huggingface,mcp,mistral,openai,retries,temporal,vertexai]==0.8.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[ag-ui,anthropic,bedrock,cli,cohere,evals,google,groq,huggingface,mcp,mistral,openai,retries,temporal,vertexai]==0.8.1
|
|
32
32
|
Provides-Extra: a2a
|
|
33
33
|
Requires-Dist: fasta2a>=0.4.1; extra == 'a2a'
|
|
34
34
|
Provides-Extra: examples
|
|
35
|
-
Requires-Dist: pydantic-ai-examples==0.8.
|
|
35
|
+
Requires-Dist: pydantic-ai-examples==0.8.1; extra == 'examples'
|
|
36
36
|
Provides-Extra: logfire
|
|
37
37
|
Requires-Dist: logfire>=3.14.1; extra == 'logfire'
|
|
38
38
|
Description-Content-Type: text/markdown
|
|
@@ -414,7 +414,7 @@ def bedrock_provider():
|
|
|
414
414
|
pytest.skip('boto3 is not installed')
|
|
415
415
|
|
|
416
416
|
|
|
417
|
-
@pytest.fixture(
|
|
417
|
+
@pytest.fixture()
|
|
418
418
|
def vertex_provider_auth(mocker: MockerFixture) -> None: # pragma: lax no cover
|
|
419
419
|
# Locally, we authenticate via `gcloud` CLI, so we don't need to patch anything.
|
|
420
420
|
if not os.getenv('CI', False):
|
|
@@ -423,7 +423,7 @@ def vertex_provider_auth(mocker: MockerFixture) -> None: # pragma: lax no cover
|
|
|
423
423
|
try:
|
|
424
424
|
from google.genai import _api_client
|
|
425
425
|
except ImportError:
|
|
426
|
-
|
|
426
|
+
return # do nothing if this isn't installed
|
|
427
427
|
|
|
428
428
|
@dataclass
|
|
429
429
|
class NoOpCredentials:
|
|
@@ -440,7 +440,7 @@ def vertex_provider_auth(mocker: MockerFixture) -> None: # pragma: lax no cover
|
|
|
440
440
|
|
|
441
441
|
|
|
442
442
|
@pytest.fixture()
|
|
443
|
-
async def vertex_provider(): # pragma: lax no cover
|
|
443
|
+
async def vertex_provider(vertex_provider_auth: None): # pragma: lax no cover
|
|
444
444
|
# NOTE: You need to comment out this line to rewrite the cassettes locally.
|
|
445
445
|
if not os.getenv('CI', False):
|
|
446
446
|
pytest.skip('Requires properly configured local google vertex config to pass')
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from functools import cached_property
|
|
6
|
+
from typing import Any, Union, cast
|
|
7
|
+
|
|
8
|
+
from ..conftest import raise_if_exception, try_import
|
|
9
|
+
from .mock_async_stream import MockAsyncStream
|
|
10
|
+
|
|
11
|
+
with try_import() as imports_successful:
|
|
12
|
+
from openai import NOT_GIVEN, AsyncOpenAI
|
|
13
|
+
from openai.types import chat, responses
|
|
14
|
+
from openai.types.chat.chat_completion import Choice, ChoiceLogprobs
|
|
15
|
+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
|
16
|
+
from openai.types.completion_usage import CompletionUsage
|
|
17
|
+
from openai.types.responses.response import ResponseUsage
|
|
18
|
+
from openai.types.responses.response_output_item import ResponseOutputItem
|
|
19
|
+
|
|
20
|
+
# note: we use Union here so that casting works with Python 3.9
|
|
21
|
+
MockChatCompletion = Union[chat.ChatCompletion, Exception]
|
|
22
|
+
MockChatCompletionChunk = Union[chat.ChatCompletionChunk, Exception]
|
|
23
|
+
MockResponse = Union[responses.Response, Exception]
|
|
24
|
+
MockResponseStreamEvent = Union[responses.ResponseStreamEvent, Exception]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class MockOpenAI:
|
|
29
|
+
completions: MockChatCompletion | Sequence[MockChatCompletion] | None = None
|
|
30
|
+
stream: Sequence[MockChatCompletionChunk] | Sequence[Sequence[MockChatCompletionChunk]] | None = None
|
|
31
|
+
index: int = 0
|
|
32
|
+
chat_completion_kwargs: list[dict[str, Any]] = field(default_factory=list)
|
|
33
|
+
|
|
34
|
+
@cached_property
|
|
35
|
+
def chat(self) -> Any:
|
|
36
|
+
chat_completions = type('Completions', (), {'create': self.chat_completions_create})
|
|
37
|
+
return type('Chat', (), {'completions': chat_completions})
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def create_mock(cls, completions: MockChatCompletion | Sequence[MockChatCompletion]) -> AsyncOpenAI:
|
|
41
|
+
return cast(AsyncOpenAI, cls(completions=completions))
|
|
42
|
+
|
|
43
|
+
@classmethod
|
|
44
|
+
def create_mock_stream(
|
|
45
|
+
cls,
|
|
46
|
+
stream: Sequence[MockChatCompletionChunk] | Sequence[Sequence[MockChatCompletionChunk]],
|
|
47
|
+
) -> AsyncOpenAI:
|
|
48
|
+
return cast(AsyncOpenAI, cls(stream=stream))
|
|
49
|
+
|
|
50
|
+
async def chat_completions_create( # pragma: lax no cover
|
|
51
|
+
self, *_args: Any, stream: bool = False, **kwargs: Any
|
|
52
|
+
) -> chat.ChatCompletion | MockAsyncStream[MockChatCompletionChunk]:
|
|
53
|
+
self.chat_completion_kwargs.append({k: v for k, v in kwargs.items() if v is not NOT_GIVEN})
|
|
54
|
+
|
|
55
|
+
if stream:
|
|
56
|
+
assert self.stream is not None, 'you can only used `stream=True` if `stream` is provided'
|
|
57
|
+
if isinstance(self.stream[0], Sequence):
|
|
58
|
+
response = MockAsyncStream(iter(cast(list[MockChatCompletionChunk], self.stream[self.index])))
|
|
59
|
+
else:
|
|
60
|
+
response = MockAsyncStream(iter(cast(list[MockChatCompletionChunk], self.stream)))
|
|
61
|
+
else:
|
|
62
|
+
assert self.completions is not None, 'you can only used `stream=False` if `completions` are provided'
|
|
63
|
+
if isinstance(self.completions, Sequence):
|
|
64
|
+
raise_if_exception(self.completions[self.index])
|
|
65
|
+
response = cast(chat.ChatCompletion, self.completions[self.index])
|
|
66
|
+
else:
|
|
67
|
+
raise_if_exception(self.completions)
|
|
68
|
+
response = cast(chat.ChatCompletion, self.completions)
|
|
69
|
+
self.index += 1
|
|
70
|
+
return response
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def get_mock_chat_completion_kwargs(async_open_ai: AsyncOpenAI) -> list[dict[str, Any]]:
|
|
74
|
+
if isinstance(async_open_ai, MockOpenAI):
|
|
75
|
+
return async_open_ai.chat_completion_kwargs
|
|
76
|
+
else: # pragma: no cover
|
|
77
|
+
raise RuntimeError('Not a MockOpenAI instance')
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def completion_message(
|
|
81
|
+
message: ChatCompletionMessage, *, usage: CompletionUsage | None = None, logprobs: ChoiceLogprobs | None = None
|
|
82
|
+
) -> chat.ChatCompletion:
|
|
83
|
+
choices = [Choice(finish_reason='stop', index=0, message=message)]
|
|
84
|
+
if logprobs:
|
|
85
|
+
choices = [Choice(finish_reason='stop', index=0, message=message, logprobs=logprobs)]
|
|
86
|
+
return chat.ChatCompletion(
|
|
87
|
+
id='123',
|
|
88
|
+
choices=choices,
|
|
89
|
+
created=1704067200, # 2024-01-01
|
|
90
|
+
model='gpt-4o-123',
|
|
91
|
+
object='chat.completion',
|
|
92
|
+
usage=usage,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
97
|
+
class MockOpenAIResponses:
|
|
98
|
+
response: MockResponse | Sequence[MockResponse] | None = None
|
|
99
|
+
stream: Sequence[MockResponseStreamEvent] | Sequence[Sequence[MockResponseStreamEvent]] | None = None
|
|
100
|
+
index: int = 0
|
|
101
|
+
response_kwargs: list[dict[str, Any]] = field(default_factory=list)
|
|
102
|
+
|
|
103
|
+
@cached_property
|
|
104
|
+
def responses(self) -> Any:
|
|
105
|
+
return type('Responses', (), {'create': self.responses_create})
|
|
106
|
+
|
|
107
|
+
@classmethod
|
|
108
|
+
def create_mock(cls, responses: MockResponse | Sequence[MockResponse]) -> AsyncOpenAI:
|
|
109
|
+
return cast(AsyncOpenAI, cls(response=responses))
|
|
110
|
+
|
|
111
|
+
@classmethod
|
|
112
|
+
def create_mock_stream(
|
|
113
|
+
cls,
|
|
114
|
+
stream: Sequence[MockResponseStreamEvent] | Sequence[Sequence[MockResponseStreamEvent]],
|
|
115
|
+
) -> AsyncOpenAI:
|
|
116
|
+
return cast(AsyncOpenAI, cls(stream=stream)) # pragma: lax no cover
|
|
117
|
+
|
|
118
|
+
async def responses_create( # pragma: lax no cover
|
|
119
|
+
self, *_args: Any, stream: bool = False, **kwargs: Any
|
|
120
|
+
) -> responses.Response | MockAsyncStream[MockResponseStreamEvent]:
|
|
121
|
+
self.response_kwargs.append({k: v for k, v in kwargs.items() if v is not NOT_GIVEN})
|
|
122
|
+
|
|
123
|
+
if stream:
|
|
124
|
+
assert self.stream is not None, 'you can only used `stream=True` if `stream` is provided'
|
|
125
|
+
if isinstance(self.stream[0], Sequence):
|
|
126
|
+
response = MockAsyncStream(iter(cast(list[MockResponseStreamEvent], self.stream[self.index])))
|
|
127
|
+
else:
|
|
128
|
+
response = MockAsyncStream(iter(cast(list[MockResponseStreamEvent], self.stream)))
|
|
129
|
+
else:
|
|
130
|
+
assert self.response is not None, 'you can only used `stream=False` if `response` are provided'
|
|
131
|
+
if isinstance(self.response, Sequence):
|
|
132
|
+
raise_if_exception(self.response[self.index])
|
|
133
|
+
response = cast(responses.Response, self.response[self.index])
|
|
134
|
+
else:
|
|
135
|
+
raise_if_exception(self.response)
|
|
136
|
+
response = cast(responses.Response, self.response)
|
|
137
|
+
self.index += 1
|
|
138
|
+
return response
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def get_mock_responses_kwargs(async_open_ai: AsyncOpenAI) -> list[dict[str, Any]]:
|
|
142
|
+
if isinstance(async_open_ai, MockOpenAIResponses): # pragma: lax no cover
|
|
143
|
+
return async_open_ai.response_kwargs
|
|
144
|
+
else: # pragma: no cover
|
|
145
|
+
raise RuntimeError('Not a MockOpenAIResponses instance')
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def response_message(
|
|
149
|
+
output_items: Sequence[ResponseOutputItem], *, usage: ResponseUsage | None = None
|
|
150
|
+
) -> responses.Response:
|
|
151
|
+
return responses.Response(
|
|
152
|
+
id='123',
|
|
153
|
+
model='gpt-4o-123',
|
|
154
|
+
object='response',
|
|
155
|
+
created_at=1704067200, # 2024-01-01
|
|
156
|
+
output=list(output_items),
|
|
157
|
+
parallel_tool_calls=True,
|
|
158
|
+
tool_choice='auto',
|
|
159
|
+
tools=[],
|
|
160
|
+
usage=usage,
|
|
161
|
+
)
|
|
@@ -205,7 +205,7 @@ async def test_sync_request_text_response(allow_model_requests: None):
|
|
|
205
205
|
model_name='claude-3-5-haiku-123',
|
|
206
206
|
timestamp=IsNow(tz=timezone.utc),
|
|
207
207
|
provider_name='anthropic',
|
|
208
|
-
|
|
208
|
+
provider_response_id='123',
|
|
209
209
|
),
|
|
210
210
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
211
211
|
ModelResponse(
|
|
@@ -214,7 +214,7 @@ async def test_sync_request_text_response(allow_model_requests: None):
|
|
|
214
214
|
model_name='claude-3-5-haiku-123',
|
|
215
215
|
timestamp=IsNow(tz=timezone.utc),
|
|
216
216
|
provider_name='anthropic',
|
|
217
|
-
|
|
217
|
+
provider_response_id='123',
|
|
218
218
|
),
|
|
219
219
|
]
|
|
220
220
|
)
|
|
@@ -303,7 +303,7 @@ async def test_request_structured_response(allow_model_requests: None):
|
|
|
303
303
|
model_name='claude-3-5-haiku-123',
|
|
304
304
|
timestamp=IsNow(tz=timezone.utc),
|
|
305
305
|
provider_name='anthropic',
|
|
306
|
-
|
|
306
|
+
provider_response_id='123',
|
|
307
307
|
),
|
|
308
308
|
ModelRequest(
|
|
309
309
|
parts=[
|
|
@@ -368,7 +368,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
368
368
|
model_name='claude-3-5-haiku-123',
|
|
369
369
|
timestamp=IsNow(tz=timezone.utc),
|
|
370
370
|
provider_name='anthropic',
|
|
371
|
-
|
|
371
|
+
provider_response_id='123',
|
|
372
372
|
),
|
|
373
373
|
ModelRequest(
|
|
374
374
|
parts=[
|
|
@@ -392,7 +392,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
392
392
|
model_name='claude-3-5-haiku-123',
|
|
393
393
|
timestamp=IsNow(tz=timezone.utc),
|
|
394
394
|
provider_name='anthropic',
|
|
395
|
-
|
|
395
|
+
provider_response_id='123',
|
|
396
396
|
),
|
|
397
397
|
ModelRequest(
|
|
398
398
|
parts=[
|
|
@@ -410,7 +410,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
410
410
|
model_name='claude-3-5-haiku-123',
|
|
411
411
|
timestamp=IsNow(tz=timezone.utc),
|
|
412
412
|
provider_name='anthropic',
|
|
413
|
-
|
|
413
|
+
provider_response_id='123',
|
|
414
414
|
),
|
|
415
415
|
]
|
|
416
416
|
)
|
|
@@ -652,7 +652,7 @@ async def test_stream_structured(allow_model_requests: None):
|
|
|
652
652
|
|
|
653
653
|
async with agent.run_stream('') as result:
|
|
654
654
|
assert not result.is_complete
|
|
655
|
-
chunks = [c async for c in result.
|
|
655
|
+
chunks = [c async for c in result.stream_output(debounce_by=None)]
|
|
656
656
|
|
|
657
657
|
# The tool output doesn't echo any content to the stream, so we only get the final payload once when
|
|
658
658
|
# the block starts and once when it ends.
|
|
@@ -757,7 +757,7 @@ async def test_image_as_binary_content_tool_response(
|
|
|
757
757
|
model_name='claude-3-5-sonnet-20241022',
|
|
758
758
|
timestamp=IsDatetime(),
|
|
759
759
|
provider_name='anthropic',
|
|
760
|
-
|
|
760
|
+
provider_response_id='msg_01Kwjzggomz7bv9og51qGFuH',
|
|
761
761
|
),
|
|
762
762
|
ModelRequest(
|
|
763
763
|
parts=[
|
|
@@ -795,7 +795,7 @@ async def test_image_as_binary_content_tool_response(
|
|
|
795
795
|
model_name='claude-3-5-sonnet-20241022',
|
|
796
796
|
timestamp=IsDatetime(),
|
|
797
797
|
provider_name='anthropic',
|
|
798
|
-
|
|
798
|
+
provider_response_id='msg_015btMBYLTuDnMP7zAeuHQGi',
|
|
799
799
|
),
|
|
800
800
|
]
|
|
801
801
|
)
|
|
@@ -917,7 +917,7 @@ async def test_anthropic_model_instructions(allow_model_requests: None, anthropi
|
|
|
917
917
|
model_name='claude-3-opus-20240229',
|
|
918
918
|
timestamp=IsDatetime(),
|
|
919
919
|
provider_name='anthropic',
|
|
920
|
-
|
|
920
|
+
provider_response_id='msg_01Fg1JVgvCYUHWsxrj9GkpEv',
|
|
921
921
|
),
|
|
922
922
|
]
|
|
923
923
|
)
|
|
@@ -965,7 +965,7 @@ I'll provide this information in a clear, helpful way, emphasizing safety withou
|
|
|
965
965
|
model_name='claude-3-7-sonnet-20250219',
|
|
966
966
|
timestamp=IsDatetime(),
|
|
967
967
|
provider_name='anthropic',
|
|
968
|
-
|
|
968
|
+
provider_response_id='msg_01BnZvs3naGorn93wjjCDwbd',
|
|
969
969
|
),
|
|
970
970
|
]
|
|
971
971
|
)
|
|
@@ -992,7 +992,7 @@ I'll provide this information in a clear, helpful way, emphasizing safety withou
|
|
|
992
992
|
model_name='claude-3-7-sonnet-20250219',
|
|
993
993
|
timestamp=IsDatetime(),
|
|
994
994
|
provider_name='anthropic',
|
|
995
|
-
|
|
995
|
+
provider_response_id=IsStr(),
|
|
996
996
|
),
|
|
997
997
|
ModelRequest(
|
|
998
998
|
parts=[
|
|
@@ -1035,7 +1035,7 @@ I'll keep the format similar to my street-crossing response for consistency.\
|
|
|
1035
1035
|
model_name='claude-3-7-sonnet-20250219',
|
|
1036
1036
|
timestamp=IsDatetime(),
|
|
1037
1037
|
provider_name='anthropic',
|
|
1038
|
-
|
|
1038
|
+
provider_response_id=IsStr(),
|
|
1039
1039
|
),
|
|
1040
1040
|
]
|
|
1041
1041
|
)
|
|
@@ -1465,7 +1465,7 @@ Several major events are happening today, including:
|
|
|
1465
1465
|
model_name='claude-3-5-sonnet-20241022',
|
|
1466
1466
|
timestamp=IsDatetime(),
|
|
1467
1467
|
provider_name='anthropic',
|
|
1468
|
-
|
|
1468
|
+
provider_response_id='msg_01W2YfD2EF8BbAqLRr8ftH4W',
|
|
1469
1469
|
),
|
|
1470
1470
|
]
|
|
1471
1471
|
)
|
|
@@ -1521,7 +1521,7 @@ print(f"3 * 12390 = {result}")\
|
|
|
1521
1521
|
model_name='claude-sonnet-4-20250514',
|
|
1522
1522
|
timestamp=IsDatetime(),
|
|
1523
1523
|
provider_name='anthropic',
|
|
1524
|
-
|
|
1524
|
+
provider_response_id='msg_01RJnbK7VMxvS2SyvtyJAQVU',
|
|
1525
1525
|
),
|
|
1526
1526
|
]
|
|
1527
1527
|
)
|
|
@@ -1571,7 +1571,7 @@ It's being celebrated as:
|
|
|
1571
1571
|
model_name='gpt-4.1-2025-04-14',
|
|
1572
1572
|
timestamp=IsDatetime(),
|
|
1573
1573
|
provider_name='openai',
|
|
1574
|
-
|
|
1574
|
+
provider_response_id='resp_689dc4abe31c81968ed493d15d8810fe0afe80ec3d42722e',
|
|
1575
1575
|
),
|
|
1576
1576
|
]
|
|
1577
1577
|
)
|
|
@@ -1687,7 +1687,7 @@ async def test_anthropic_tool_output(allow_model_requests: None, anthropic_api_k
|
|
|
1687
1687
|
model_name='claude-3-5-sonnet-20241022',
|
|
1688
1688
|
timestamp=IsDatetime(),
|
|
1689
1689
|
provider_name='anthropic',
|
|
1690
|
-
|
|
1690
|
+
provider_response_id='msg_012TXW181edhmR5JCsQRsBKx',
|
|
1691
1691
|
),
|
|
1692
1692
|
ModelRequest(
|
|
1693
1693
|
parts=[
|
|
@@ -1720,7 +1720,7 @@ async def test_anthropic_tool_output(allow_model_requests: None, anthropic_api_k
|
|
|
1720
1720
|
model_name='claude-3-5-sonnet-20241022',
|
|
1721
1721
|
timestamp=IsDatetime(),
|
|
1722
1722
|
provider_name='anthropic',
|
|
1723
|
-
|
|
1723
|
+
provider_response_id='msg_01K4Fzcf1bhiyLzHpwLdrefj',
|
|
1724
1724
|
),
|
|
1725
1725
|
ModelRequest(
|
|
1726
1726
|
parts=[
|
|
@@ -1785,7 +1785,7 @@ async def test_anthropic_text_output_function(allow_model_requests: None, anthro
|
|
|
1785
1785
|
model_name='claude-3-5-sonnet-20241022',
|
|
1786
1786
|
timestamp=IsDatetime(),
|
|
1787
1787
|
provider_name='anthropic',
|
|
1788
|
-
|
|
1788
|
+
provider_response_id='msg_01MsqUB7ZyhjGkvepS1tCXp3',
|
|
1789
1789
|
),
|
|
1790
1790
|
ModelRequest(
|
|
1791
1791
|
parts=[
|
|
@@ -1816,7 +1816,7 @@ async def test_anthropic_text_output_function(allow_model_requests: None, anthro
|
|
|
1816
1816
|
model_name='claude-3-5-sonnet-20241022',
|
|
1817
1817
|
timestamp=IsDatetime(),
|
|
1818
1818
|
provider_name='anthropic',
|
|
1819
|
-
|
|
1819
|
+
provider_response_id='msg_0142umg4diSckrDtV9vAmmPL',
|
|
1820
1820
|
),
|
|
1821
1821
|
]
|
|
1822
1822
|
)
|
|
@@ -1874,7 +1874,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
1874
1874
|
model_name='claude-3-5-sonnet-20241022',
|
|
1875
1875
|
timestamp=IsDatetime(),
|
|
1876
1876
|
provider_name='anthropic',
|
|
1877
|
-
|
|
1877
|
+
provider_response_id='msg_018YiNXULHGpoKoHkTt6GivG',
|
|
1878
1878
|
),
|
|
1879
1879
|
ModelRequest(
|
|
1880
1880
|
parts=[
|
|
@@ -1908,7 +1908,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
1908
1908
|
model_name='claude-3-5-sonnet-20241022',
|
|
1909
1909
|
timestamp=IsDatetime(),
|
|
1910
1910
|
provider_name='anthropic',
|
|
1911
|
-
|
|
1911
|
+
provider_response_id='msg_01WiRVmLhCrJbJZRqmAWKv3X',
|
|
1912
1912
|
),
|
|
1913
1913
|
]
|
|
1914
1914
|
)
|
|
@@ -1966,7 +1966,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
1966
1966
|
model_name='claude-3-5-sonnet-20241022',
|
|
1967
1967
|
timestamp=IsDatetime(),
|
|
1968
1968
|
provider_name='anthropic',
|
|
1969
|
-
|
|
1969
|
+
provider_response_id='msg_01N2PwwVQo2aBtt6UFhMDtEX',
|
|
1970
1970
|
),
|
|
1971
1971
|
]
|
|
1972
1972
|
)
|
|
@@ -443,7 +443,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key
|
|
|
443
443
|
model_name='o3-mini-2025-01-31',
|
|
444
444
|
timestamp=IsDatetime(),
|
|
445
445
|
provider_name='openai',
|
|
446
|
-
|
|
446
|
+
provider_response_id='resp_680739f4ad748191bd11096967c37c8b048efc3f8b2a068e',
|
|
447
447
|
),
|
|
448
448
|
]
|
|
449
449
|
)
|
|
@@ -468,7 +468,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key
|
|
|
468
468
|
model_name='o3-mini-2025-01-31',
|
|
469
469
|
timestamp=IsDatetime(),
|
|
470
470
|
provider_name='openai',
|
|
471
|
-
|
|
471
|
+
provider_response_id='resp_680739f4ad748191bd11096967c37c8b048efc3f8b2a068e',
|
|
472
472
|
),
|
|
473
473
|
ModelRequest(
|
|
474
474
|
parts=[
|
|
@@ -56,7 +56,7 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek
|
|
|
56
56
|
model_name='deepseek-reasoner',
|
|
57
57
|
timestamp=IsDatetime(),
|
|
58
58
|
provider_name='deepseek',
|
|
59
|
-
|
|
59
|
+
provider_response_id='181d9669-2b3a-445e-bd13-2ebff2c378f6',
|
|
60
60
|
),
|
|
61
61
|
]
|
|
62
62
|
)
|
|
@@ -171,7 +171,7 @@ async def test_first_failed_instrumented_stream(capfire: CaptureLogfire) -> None
|
|
|
171
171
|
fallback_model = FallbackModel(failure_model_stream, success_model_stream)
|
|
172
172
|
agent = Agent(model=fallback_model, instrument=True)
|
|
173
173
|
async with agent.run_stream('input') as result:
|
|
174
|
-
assert [c async for c, _is_last in result.
|
|
174
|
+
assert [c async for c, _is_last in result.stream_responses(debounce_by=None)] == snapshot(
|
|
175
175
|
[
|
|
176
176
|
ModelResponse(
|
|
177
177
|
parts=[TextPart(content='hello ')],
|
|
@@ -356,7 +356,7 @@ async def test_first_success_streaming() -> None:
|
|
|
356
356
|
fallback_model = FallbackModel(success_model_stream, failure_model_stream)
|
|
357
357
|
agent = Agent(model=fallback_model)
|
|
358
358
|
async with agent.run_stream('input') as result:
|
|
359
|
-
assert [c async for c, _is_last in result.
|
|
359
|
+
assert [c async for c, _is_last in result.stream_responses(debounce_by=None)] == snapshot(
|
|
360
360
|
[
|
|
361
361
|
ModelResponse(
|
|
362
362
|
parts=[TextPart(content='hello ')],
|
|
@@ -385,7 +385,7 @@ async def test_first_failed_streaming() -> None:
|
|
|
385
385
|
fallback_model = FallbackModel(failure_model_stream, success_model_stream)
|
|
386
386
|
agent = Agent(model=fallback_model)
|
|
387
387
|
async with agent.run_stream('input') as result:
|
|
388
|
-
assert [c async for c, _is_last in result.
|
|
388
|
+
assert [c async for c, _is_last in result.stream_responses(debounce_by=None)] == snapshot(
|
|
389
389
|
[
|
|
390
390
|
ModelResponse(
|
|
391
391
|
parts=[TextPart(content='hello ')],
|
|
@@ -415,7 +415,7 @@ async def test_all_failed_streaming() -> None:
|
|
|
415
415
|
agent = Agent(model=fallback_model)
|
|
416
416
|
with pytest.raises(ExceptionGroup) as exc_info:
|
|
417
417
|
async with agent.run_stream('hello') as result:
|
|
418
|
-
[c async for c, _is_last in result.
|
|
418
|
+
[c async for c, _is_last in result.stream_responses(debounce_by=None)] # pragma: lax no cover
|
|
419
419
|
assert 'All models from FallbackModel failed' in exc_info.value.args[0]
|
|
420
420
|
exceptions = exc_info.value.exceptions
|
|
421
421
|
assert len(exceptions) == 2
|
|
@@ -814,7 +814,7 @@ async def test_stream_text(get_gemini_client: GetGeminiClient):
|
|
|
814
814
|
agent = Agent(m)
|
|
815
815
|
|
|
816
816
|
async with agent.run_stream('Hello') as result:
|
|
817
|
-
chunks = [chunk async for chunk in result.
|
|
817
|
+
chunks = [chunk async for chunk in result.stream_output(debounce_by=None)]
|
|
818
818
|
assert chunks == snapshot(
|
|
819
819
|
[
|
|
820
820
|
'Hello ',
|
|
@@ -859,7 +859,7 @@ async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient):
|
|
|
859
859
|
agent = Agent(m)
|
|
860
860
|
|
|
861
861
|
async with agent.run_stream('Hello') as result:
|
|
862
|
-
chunks = [chunk async for chunk in result.
|
|
862
|
+
chunks = [chunk async for chunk in result.stream_output(debounce_by=None)]
|
|
863
863
|
assert chunks == snapshot(['abc', 'abc€def', 'abc€def'])
|
|
864
864
|
assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2))
|
|
865
865
|
|
|
@@ -889,7 +889,7 @@ async def test_stream_structured(get_gemini_client: GetGeminiClient):
|
|
|
889
889
|
agent = Agent(model, output_type=tuple[int, int])
|
|
890
890
|
|
|
891
891
|
async with agent.run_stream('Hello') as result:
|
|
892
|
-
chunks = [chunk async for chunk in result.
|
|
892
|
+
chunks = [chunk async for chunk in result.stream_output(debounce_by=None)]
|
|
893
893
|
assert chunks == snapshot([(1, 2), (1, 2)])
|
|
894
894
|
assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2))
|
|
895
895
|
|
|
@@ -1491,7 +1491,7 @@ Always be cautious—even if you have the right-of-way—and understand that it'
|
|
|
1491
1491
|
model_name='o3-mini-2025-01-31',
|
|
1492
1492
|
timestamp=IsDatetime(),
|
|
1493
1493
|
provider_name='openai',
|
|
1494
|
-
|
|
1494
|
+
provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b',
|
|
1495
1495
|
),
|
|
1496
1496
|
]
|
|
1497
1497
|
)
|
|
@@ -1518,7 +1518,7 @@ Always be cautious—even if you have the right-of-way—and understand that it'
|
|
|
1518
1518
|
model_name='o3-mini-2025-01-31',
|
|
1519
1519
|
timestamp=IsDatetime(),
|
|
1520
1520
|
provider_name='openai',
|
|
1521
|
-
|
|
1521
|
+
provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b',
|
|
1522
1522
|
),
|
|
1523
1523
|
ModelRequest(
|
|
1524
1524
|
parts=[
|
|
@@ -1688,7 +1688,7 @@ async def test_gemini_tool_config_any_with_tool_without_args(allow_model_request
|
|
|
1688
1688
|
model_name='gemini-2.0-flash',
|
|
1689
1689
|
timestamp=IsDatetime(),
|
|
1690
1690
|
provider_details={'finish_reason': 'STOP'},
|
|
1691
|
-
|
|
1691
|
+
provider_response_id=IsStr(),
|
|
1692
1692
|
),
|
|
1693
1693
|
ModelRequest(
|
|
1694
1694
|
parts=[
|
|
@@ -1714,7 +1714,7 @@ async def test_gemini_tool_config_any_with_tool_without_args(allow_model_request
|
|
|
1714
1714
|
model_name='gemini-2.0-flash',
|
|
1715
1715
|
timestamp=IsDatetime(),
|
|
1716
1716
|
provider_details={'finish_reason': 'STOP'},
|
|
1717
|
-
|
|
1717
|
+
provider_response_id=IsStr(),
|
|
1718
1718
|
),
|
|
1719
1719
|
ModelRequest(
|
|
1720
1720
|
parts=[
|
|
@@ -1765,7 +1765,7 @@ async def test_gemini_tool_output(allow_model_requests: None, gemini_api_key: st
|
|
|
1765
1765
|
model_name='gemini-2.0-flash',
|
|
1766
1766
|
timestamp=IsDatetime(),
|
|
1767
1767
|
provider_details={'finish_reason': 'STOP'},
|
|
1768
|
-
|
|
1768
|
+
provider_response_id=IsStr(),
|
|
1769
1769
|
),
|
|
1770
1770
|
ModelRequest(
|
|
1771
1771
|
parts=[
|
|
@@ -1791,7 +1791,7 @@ async def test_gemini_tool_output(allow_model_requests: None, gemini_api_key: st
|
|
|
1791
1791
|
model_name='gemini-2.0-flash',
|
|
1792
1792
|
timestamp=IsDatetime(),
|
|
1793
1793
|
provider_details={'finish_reason': 'STOP'},
|
|
1794
|
-
|
|
1794
|
+
provider_response_id=IsStr(),
|
|
1795
1795
|
),
|
|
1796
1796
|
ModelRequest(
|
|
1797
1797
|
parts=[
|
|
@@ -1849,7 +1849,7 @@ It's the capital of Mexico and one of the largest metropolitan areas in the worl
|
|
|
1849
1849
|
model_name='models/gemini-2.5-pro-preview-05-06',
|
|
1850
1850
|
timestamp=IsDatetime(),
|
|
1851
1851
|
provider_details={'finish_reason': 'STOP'},
|
|
1852
|
-
|
|
1852
|
+
provider_response_id='TT9IaNfGN_DmqtsPzKnE4AE',
|
|
1853
1853
|
),
|
|
1854
1854
|
]
|
|
1855
1855
|
)
|
|
@@ -1915,7 +1915,7 @@ async def test_gemini_native_output(allow_model_requests: None, gemini_api_key:
|
|
|
1915
1915
|
model_name='gemini-2.0-flash',
|
|
1916
1916
|
timestamp=IsDatetime(),
|
|
1917
1917
|
provider_details={'finish_reason': 'STOP'},
|
|
1918
|
-
|
|
1918
|
+
provider_response_id=IsStr(),
|
|
1919
1919
|
),
|
|
1920
1920
|
]
|
|
1921
1921
|
)
|
|
@@ -1970,7 +1970,7 @@ async def test_gemini_native_output_multiple(allow_model_requests: None, gemini_
|
|
|
1970
1970
|
model_name='gemini-2.0-flash',
|
|
1971
1971
|
timestamp=IsDatetime(),
|
|
1972
1972
|
provider_details={'finish_reason': 'STOP'},
|
|
1973
|
-
|
|
1973
|
+
provider_response_id=IsStr(),
|
|
1974
1974
|
),
|
|
1975
1975
|
]
|
|
1976
1976
|
)
|
|
@@ -2018,7 +2018,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
2018
2018
|
model_name='gemini-2.0-flash',
|
|
2019
2019
|
timestamp=IsDatetime(),
|
|
2020
2020
|
provider_details={'finish_reason': 'STOP'},
|
|
2021
|
-
|
|
2021
|
+
provider_response_id=IsStr(),
|
|
2022
2022
|
),
|
|
2023
2023
|
]
|
|
2024
2024
|
)
|
|
@@ -2068,7 +2068,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
2068
2068
|
model_name='models/gemini-2.5-pro-preview-05-06',
|
|
2069
2069
|
timestamp=IsDatetime(),
|
|
2070
2070
|
provider_details={'finish_reason': 'STOP'},
|
|
2071
|
-
|
|
2071
|
+
provider_response_id=IsStr(),
|
|
2072
2072
|
),
|
|
2073
2073
|
ModelRequest(
|
|
2074
2074
|
parts=[
|
|
@@ -2095,7 +2095,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
2095
2095
|
model_name='models/gemini-2.5-pro-preview-05-06',
|
|
2096
2096
|
timestamp=IsDatetime(),
|
|
2097
2097
|
provider_details={'finish_reason': 'STOP'},
|
|
2098
|
-
|
|
2098
|
+
provider_response_id=IsStr(),
|
|
2099
2099
|
),
|
|
2100
2100
|
]
|
|
2101
2101
|
)
|
|
@@ -2149,7 +2149,7 @@ Don't include any text or Markdown fencing before or after.\
|
|
|
2149
2149
|
model_name='gemini-2.0-flash',
|
|
2150
2150
|
timestamp=IsDatetime(),
|
|
2151
2151
|
provider_details={'finish_reason': 'STOP'},
|
|
2152
|
-
|
|
2152
|
+
provider_response_id=IsStr(),
|
|
2153
2153
|
),
|
|
2154
2154
|
]
|
|
2155
2155
|
)
|
|
@@ -149,7 +149,7 @@ async def test_url_input(
|
|
|
149
149
|
model_name='gemini-2.0-flash',
|
|
150
150
|
timestamp=IsDatetime(),
|
|
151
151
|
provider_details={'finish_reason': 'STOP'},
|
|
152
|
-
|
|
152
|
+
provider_response_id=IsStr(),
|
|
153
153
|
),
|
|
154
154
|
]
|
|
155
155
|
)
|
|
@@ -186,7 +186,7 @@ async def test_url_input_force_download(allow_model_requests: None) -> None: #
|
|
|
186
186
|
model_name='gemini-2.0-flash',
|
|
187
187
|
timestamp=IsDatetime(),
|
|
188
188
|
provider_details={'finish_reason': 'STOP'},
|
|
189
|
-
|
|
189
|
+
provider_response_id=IsStr(),
|
|
190
190
|
),
|
|
191
191
|
]
|
|
192
192
|
)
|