pydantic-ai 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/PKG-INFO +3 -3
- pydantic_ai-0.1.1/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_false.yaml +76 -0
- pydantic_ai-0.1.1/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_true.yaml +73 -0
- pydantic_ai-0.1.1/tests/models/cassettes/test_openai/test_openai_instructions_with_tool_calls_keep_instructions.yaml +207 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_gemini.py +37 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_openai.py +38 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_tools.py +7 -1
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/.gitignore +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/LICENSE +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/Makefile +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/README.md +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/pyproject.toml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/__init__.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/assets/dummy.pdf +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/assets/kiwi.png +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/assets/marcelo.mp3 +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/assets/small_video.mp4 +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[anthropic].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[bedrock].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[cohere].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[gemini].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[groq].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[mistral].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[openai].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/conftest.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/__init__.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_dataset.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_evaluator_base.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_evaluator_common.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_evaluator_context.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_evaluator_spec.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_evaluators.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_llm_as_a_judge.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_otel.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_render_numbers.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_reporting.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_reports.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/test_utils.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/evals/utils.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/test_file_persistence.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/test_graph.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/test_mermaid.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/test_persistence.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/test_state.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/graph/test_utils.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/import_examples.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/json_body_serializer.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/mcp_server.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_anthropic_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_empty_system_prompt.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_guardrail_config.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_other_parameters.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_performance_config.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_video_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_video_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_cohere/test_cohere_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_cohere/test_request_simple_success_with_vcr.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_gemini_drop_exclusive_maximum.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_gemini_exclusive_minimum_and_maximum.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_gemini_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_groq/test_groq_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_mistral/test_mistral_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4.5-preview].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4o-mini].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_max_completion_tokens[o3-mini].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_multiple_agent_tool_calls.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_openai_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_openai_model_without_system_prompt.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_user_id.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_image_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_builtin_tools.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_http_error.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_instructions.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_retry.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response_with_tool_call.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_output_type.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_effort.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_generate_summary.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_stream.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_system_prompt.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai_responses/test_openai_responses_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_anthropic.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_bedrock.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_cohere.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_fallback.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_groq.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_instrumented.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_mistral.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/test_openai_responses.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/__init__.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/cassettes/test_google_vertex/test_vertexai_provider.yaml +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_anthropic.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_azure.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_bedrock.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_cohere.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_deepseek.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_google_gla.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_google_vertex.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_groq.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_mistral.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_openai.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/providers/test_provider_names.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_agent.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_cli.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_deps.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_examples.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_json_body_serializer.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_live.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_logfire.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_mcp.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_messages.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_settings.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_streaming.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_usage_limits.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/test_utils.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/typed_agent.py +0 -0
- {pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/typed_graph.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.1
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
|
|
|
28
28
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.9
|
|
31
|
-
Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.1.1
|
|
32
32
|
Provides-Extra: examples
|
|
33
|
-
Requires-Dist: pydantic-ai-examples==0.1.
|
|
33
|
+
Requires-Dist: pydantic-ai-examples==0.1.1; extra == 'examples'
|
|
34
34
|
Provides-Extra: logfire
|
|
35
35
|
Requires-Dist: logfire>=3.11.0; extra == 'logfire'
|
|
36
36
|
Description-Content-Type: text/markdown
|
pydantic_ai-0.1.1/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_false.yaml
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
headers:
|
|
4
|
+
accept:
|
|
5
|
+
- '*/*'
|
|
6
|
+
accept-encoding:
|
|
7
|
+
- gzip, deflate
|
|
8
|
+
connection:
|
|
9
|
+
- keep-alive
|
|
10
|
+
content-length:
|
|
11
|
+
- '296'
|
|
12
|
+
content-type:
|
|
13
|
+
- application/json
|
|
14
|
+
host:
|
|
15
|
+
- generativelanguage.googleapis.com
|
|
16
|
+
method: POST
|
|
17
|
+
parsed_body:
|
|
18
|
+
contents:
|
|
19
|
+
- parts:
|
|
20
|
+
- text: What is the temperature in Tokyo?
|
|
21
|
+
role: user
|
|
22
|
+
tools:
|
|
23
|
+
function_declarations:
|
|
24
|
+
- description: null
|
|
25
|
+
name: get_temperature
|
|
26
|
+
parameters:
|
|
27
|
+
properties:
|
|
28
|
+
city:
|
|
29
|
+
type: string
|
|
30
|
+
country:
|
|
31
|
+
type: string
|
|
32
|
+
required:
|
|
33
|
+
- city
|
|
34
|
+
- country
|
|
35
|
+
type: object
|
|
36
|
+
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent
|
|
37
|
+
response:
|
|
38
|
+
headers:
|
|
39
|
+
alt-svc:
|
|
40
|
+
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
|
41
|
+
content-length:
|
|
42
|
+
- '748'
|
|
43
|
+
content-type:
|
|
44
|
+
- application/json; charset=UTF-8
|
|
45
|
+
server-timing:
|
|
46
|
+
- gfet4t7; dur=523
|
|
47
|
+
transfer-encoding:
|
|
48
|
+
- chunked
|
|
49
|
+
vary:
|
|
50
|
+
- Origin
|
|
51
|
+
- X-Origin
|
|
52
|
+
- Referer
|
|
53
|
+
parsed_body:
|
|
54
|
+
candidates:
|
|
55
|
+
- avgLogprobs: -0.12538558465463143
|
|
56
|
+
content:
|
|
57
|
+
parts:
|
|
58
|
+
- text: |
|
|
59
|
+
The available tools lack the ability to access real-time information, including current temperature. Therefore, I cannot answer your question.
|
|
60
|
+
role: model
|
|
61
|
+
finishReason: STOP
|
|
62
|
+
modelVersion: gemini-1.5-flash
|
|
63
|
+
usageMetadata:
|
|
64
|
+
candidatesTokenCount: 27
|
|
65
|
+
candidatesTokensDetails:
|
|
66
|
+
- modality: TEXT
|
|
67
|
+
tokenCount: 27
|
|
68
|
+
promptTokenCount: 14
|
|
69
|
+
promptTokensDetails:
|
|
70
|
+
- modality: TEXT
|
|
71
|
+
tokenCount: 14
|
|
72
|
+
totalTokenCount: 41
|
|
73
|
+
status:
|
|
74
|
+
code: 200
|
|
75
|
+
message: OK
|
|
76
|
+
version: 1
|
pydantic_ai-0.1.1/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_true.yaml
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
headers:
|
|
4
|
+
accept:
|
|
5
|
+
- '*/*'
|
|
6
|
+
accept-encoding:
|
|
7
|
+
- gzip, deflate
|
|
8
|
+
connection:
|
|
9
|
+
- keep-alive
|
|
10
|
+
content-length:
|
|
11
|
+
- '264'
|
|
12
|
+
content-type:
|
|
13
|
+
- application/json
|
|
14
|
+
host:
|
|
15
|
+
- generativelanguage.googleapis.com
|
|
16
|
+
method: POST
|
|
17
|
+
parsed_body:
|
|
18
|
+
contents:
|
|
19
|
+
- parts:
|
|
20
|
+
- text: What is the temperature in Tokyo?
|
|
21
|
+
role: user
|
|
22
|
+
tools:
|
|
23
|
+
function_declarations:
|
|
24
|
+
- description: ''
|
|
25
|
+
name: get_temperature
|
|
26
|
+
parameters:
|
|
27
|
+
properties:
|
|
28
|
+
location:
|
|
29
|
+
type: object
|
|
30
|
+
required:
|
|
31
|
+
- location
|
|
32
|
+
type: object
|
|
33
|
+
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent
|
|
34
|
+
response:
|
|
35
|
+
headers:
|
|
36
|
+
alt-svc:
|
|
37
|
+
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
|
38
|
+
content-length:
|
|
39
|
+
- '741'
|
|
40
|
+
content-type:
|
|
41
|
+
- application/json; charset=UTF-8
|
|
42
|
+
server-timing:
|
|
43
|
+
- gfet4t7; dur=534
|
|
44
|
+
transfer-encoding:
|
|
45
|
+
- chunked
|
|
46
|
+
vary:
|
|
47
|
+
- Origin
|
|
48
|
+
- X-Origin
|
|
49
|
+
- Referer
|
|
50
|
+
parsed_body:
|
|
51
|
+
candidates:
|
|
52
|
+
- avgLogprobs: -0.15060695580073766
|
|
53
|
+
content:
|
|
54
|
+
parts:
|
|
55
|
+
- text: |
|
|
56
|
+
I need a location dictionary to use the `get_temperature` function. I cannot provide the temperature in Tokyo without more information.
|
|
57
|
+
role: model
|
|
58
|
+
finishReason: STOP
|
|
59
|
+
modelVersion: gemini-1.5-flash
|
|
60
|
+
usageMetadata:
|
|
61
|
+
candidatesTokenCount: 28
|
|
62
|
+
candidatesTokensDetails:
|
|
63
|
+
- modality: TEXT
|
|
64
|
+
tokenCount: 28
|
|
65
|
+
promptTokenCount: 12
|
|
66
|
+
promptTokensDetails:
|
|
67
|
+
- modality: TEXT
|
|
68
|
+
tokenCount: 12
|
|
69
|
+
totalTokenCount: 40
|
|
70
|
+
status:
|
|
71
|
+
code: 200
|
|
72
|
+
message: OK
|
|
73
|
+
version: 1
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
headers:
|
|
4
|
+
accept:
|
|
5
|
+
- application/json
|
|
6
|
+
accept-encoding:
|
|
7
|
+
- gzip, deflate
|
|
8
|
+
connection:
|
|
9
|
+
- keep-alive
|
|
10
|
+
content-length:
|
|
11
|
+
- '419'
|
|
12
|
+
content-type:
|
|
13
|
+
- application/json
|
|
14
|
+
host:
|
|
15
|
+
- api.openai.com
|
|
16
|
+
method: POST
|
|
17
|
+
parsed_body:
|
|
18
|
+
messages:
|
|
19
|
+
- content: You are a helpful assistant.
|
|
20
|
+
role: system
|
|
21
|
+
- content: What is the temperature in Tokyo?
|
|
22
|
+
role: user
|
|
23
|
+
model: gpt-4.1-mini
|
|
24
|
+
n: 1
|
|
25
|
+
stream: false
|
|
26
|
+
tool_choice: auto
|
|
27
|
+
tools:
|
|
28
|
+
- function:
|
|
29
|
+
description: ''
|
|
30
|
+
name: get_temperature
|
|
31
|
+
parameters:
|
|
32
|
+
additionalProperties: false
|
|
33
|
+
properties:
|
|
34
|
+
city:
|
|
35
|
+
type: string
|
|
36
|
+
required:
|
|
37
|
+
- city
|
|
38
|
+
type: object
|
|
39
|
+
strict: true
|
|
40
|
+
type: function
|
|
41
|
+
uri: https://api.openai.com/v1/chat/completions
|
|
42
|
+
response:
|
|
43
|
+
headers:
|
|
44
|
+
access-control-expose-headers:
|
|
45
|
+
- X-Request-ID
|
|
46
|
+
alt-svc:
|
|
47
|
+
- h3=":443"; ma=86400
|
|
48
|
+
connection:
|
|
49
|
+
- keep-alive
|
|
50
|
+
content-length:
|
|
51
|
+
- '1089'
|
|
52
|
+
content-type:
|
|
53
|
+
- application/json
|
|
54
|
+
openai-organization:
|
|
55
|
+
- pydantic-28gund
|
|
56
|
+
openai-processing-ms:
|
|
57
|
+
- '490'
|
|
58
|
+
openai-version:
|
|
59
|
+
- '2020-10-01'
|
|
60
|
+
strict-transport-security:
|
|
61
|
+
- max-age=31536000; includeSubDomains; preload
|
|
62
|
+
transfer-encoding:
|
|
63
|
+
- chunked
|
|
64
|
+
parsed_body:
|
|
65
|
+
choices:
|
|
66
|
+
- finish_reason: tool_calls
|
|
67
|
+
index: 0
|
|
68
|
+
logprobs: null
|
|
69
|
+
message:
|
|
70
|
+
annotations: []
|
|
71
|
+
content: null
|
|
72
|
+
refusal: null
|
|
73
|
+
role: assistant
|
|
74
|
+
tool_calls:
|
|
75
|
+
- function:
|
|
76
|
+
arguments: '{"city":"Tokyo"}'
|
|
77
|
+
name: get_temperature
|
|
78
|
+
id: call_bhZkmIKKItNGJ41whHUHB7p9
|
|
79
|
+
type: function
|
|
80
|
+
created: 1744810634
|
|
81
|
+
id: chatcmpl-BMxEwRA0p0gJ52oKS7806KAlfMhqq
|
|
82
|
+
model: gpt-4.1-mini-2025-04-14
|
|
83
|
+
object: chat.completion
|
|
84
|
+
service_tier: default
|
|
85
|
+
system_fingerprint: fp_38647f5e19
|
|
86
|
+
usage:
|
|
87
|
+
completion_tokens: 15
|
|
88
|
+
completion_tokens_details:
|
|
89
|
+
accepted_prediction_tokens: 0
|
|
90
|
+
audio_tokens: 0
|
|
91
|
+
reasoning_tokens: 0
|
|
92
|
+
rejected_prediction_tokens: 0
|
|
93
|
+
prompt_tokens: 50
|
|
94
|
+
prompt_tokens_details:
|
|
95
|
+
audio_tokens: 0
|
|
96
|
+
cached_tokens: 0
|
|
97
|
+
total_tokens: 65
|
|
98
|
+
status:
|
|
99
|
+
code: 200
|
|
100
|
+
message: OK
|
|
101
|
+
- request:
|
|
102
|
+
headers:
|
|
103
|
+
accept:
|
|
104
|
+
- application/json
|
|
105
|
+
accept-encoding:
|
|
106
|
+
- gzip, deflate
|
|
107
|
+
connection:
|
|
108
|
+
- keep-alive
|
|
109
|
+
content-length:
|
|
110
|
+
- '665'
|
|
111
|
+
content-type:
|
|
112
|
+
- application/json
|
|
113
|
+
cookie:
|
|
114
|
+
- __cf_bm=x.H2GlMeh.t_Q.gVlCXrh3.ggn9lKjhmUeG_ToNThLs-1744810635-1.0.1.1-tiHwqGvBw3eEy_y9_q5nx7B.7YCbLb9cXdDj6DklLmtFllOFe708mKwYvGd8fY2y5bO2NOagULipA7MxfwW9P0hlnRSiJZbZBO9tjrUweFc;
|
|
115
|
+
_cfuvid=VlHcJdsIsxGEt2lddKu_5Am_lfyYndl9JB2Ezy.aygo-1744810635187-0.0.1.1-604800000
|
|
116
|
+
host:
|
|
117
|
+
- api.openai.com
|
|
118
|
+
method: POST
|
|
119
|
+
parsed_body:
|
|
120
|
+
messages:
|
|
121
|
+
- content: You are a helpful assistant.
|
|
122
|
+
role: system
|
|
123
|
+
- content: What is the temperature in Tokyo?
|
|
124
|
+
role: user
|
|
125
|
+
- role: assistant
|
|
126
|
+
tool_calls:
|
|
127
|
+
- function:
|
|
128
|
+
arguments: '{"city":"Tokyo"}'
|
|
129
|
+
name: get_temperature
|
|
130
|
+
id: call_bhZkmIKKItNGJ41whHUHB7p9
|
|
131
|
+
type: function
|
|
132
|
+
- content: '20.0'
|
|
133
|
+
role: tool
|
|
134
|
+
tool_call_id: call_bhZkmIKKItNGJ41whHUHB7p9
|
|
135
|
+
model: gpt-4.1-mini
|
|
136
|
+
n: 1
|
|
137
|
+
stream: false
|
|
138
|
+
tool_choice: auto
|
|
139
|
+
tools:
|
|
140
|
+
- function:
|
|
141
|
+
description: ''
|
|
142
|
+
name: get_temperature
|
|
143
|
+
parameters:
|
|
144
|
+
additionalProperties: false
|
|
145
|
+
properties:
|
|
146
|
+
city:
|
|
147
|
+
type: string
|
|
148
|
+
required:
|
|
149
|
+
- city
|
|
150
|
+
type: object
|
|
151
|
+
strict: true
|
|
152
|
+
type: function
|
|
153
|
+
uri: https://api.openai.com/v1/chat/completions
|
|
154
|
+
response:
|
|
155
|
+
headers:
|
|
156
|
+
access-control-expose-headers:
|
|
157
|
+
- X-Request-ID
|
|
158
|
+
alt-svc:
|
|
159
|
+
- h3=":443"; ma=86400
|
|
160
|
+
connection:
|
|
161
|
+
- keep-alive
|
|
162
|
+
content-length:
|
|
163
|
+
- '867'
|
|
164
|
+
content-type:
|
|
165
|
+
- application/json
|
|
166
|
+
openai-organization:
|
|
167
|
+
- pydantic-28gund
|
|
168
|
+
openai-processing-ms:
|
|
169
|
+
- '949'
|
|
170
|
+
openai-version:
|
|
171
|
+
- '2020-10-01'
|
|
172
|
+
strict-transport-security:
|
|
173
|
+
- max-age=31536000; includeSubDomains; preload
|
|
174
|
+
transfer-encoding:
|
|
175
|
+
- chunked
|
|
176
|
+
parsed_body:
|
|
177
|
+
choices:
|
|
178
|
+
- finish_reason: stop
|
|
179
|
+
index: 0
|
|
180
|
+
logprobs: null
|
|
181
|
+
message:
|
|
182
|
+
annotations: []
|
|
183
|
+
content: The temperature in Tokyo is currently 20.0 degrees Celsius.
|
|
184
|
+
refusal: null
|
|
185
|
+
role: assistant
|
|
186
|
+
created: 1744810635
|
|
187
|
+
id: chatcmpl-BMxEx6B8JEj6oDC45MOWKp0phg8UP
|
|
188
|
+
model: gpt-4.1-mini-2025-04-14
|
|
189
|
+
object: chat.completion
|
|
190
|
+
service_tier: default
|
|
191
|
+
system_fingerprint: fp_38647f5e19
|
|
192
|
+
usage:
|
|
193
|
+
completion_tokens: 15
|
|
194
|
+
completion_tokens_details:
|
|
195
|
+
accepted_prediction_tokens: 0
|
|
196
|
+
audio_tokens: 0
|
|
197
|
+
reasoning_tokens: 0
|
|
198
|
+
rejected_prediction_tokens: 0
|
|
199
|
+
prompt_tokens: 75
|
|
200
|
+
prompt_tokens_details:
|
|
201
|
+
audio_tokens: 0
|
|
202
|
+
cached_tokens: 0
|
|
203
|
+
total_tokens: 90
|
|
204
|
+
status:
|
|
205
|
+
code: 200
|
|
206
|
+
message: OK
|
|
207
|
+
version: 1
|
|
@@ -1029,3 +1029,40 @@ async def test_gemini_model_instructions(allow_model_requests: None, gemini_api_
|
|
|
1029
1029
|
),
|
|
1030
1030
|
]
|
|
1031
1031
|
)
|
|
1032
|
+
|
|
1033
|
+
|
|
1034
|
+
class CurrentLocation(BaseModel, extra='forbid'):
|
|
1035
|
+
city: str
|
|
1036
|
+
country: str
|
|
1037
|
+
|
|
1038
|
+
|
|
1039
|
+
@pytest.mark.vcr()
|
|
1040
|
+
async def test_gemini_additional_properties_is_false(allow_model_requests: None, gemini_api_key: str):
|
|
1041
|
+
m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key=gemini_api_key))
|
|
1042
|
+
agent = Agent(m)
|
|
1043
|
+
|
|
1044
|
+
@agent.tool_plain
|
|
1045
|
+
async def get_temperature(location: CurrentLocation) -> float: # pragma: no cover
|
|
1046
|
+
return 20.0
|
|
1047
|
+
|
|
1048
|
+
result = await agent.run('What is the temperature in Tokyo?')
|
|
1049
|
+
assert result.output == snapshot(
|
|
1050
|
+
'The available tools lack the ability to access real-time information, including current temperature. Therefore, I cannot answer your question.\n'
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
@pytest.mark.vcr()
|
|
1055
|
+
async def test_gemini_additional_properties_is_true(allow_model_requests: None, gemini_api_key: str):
|
|
1056
|
+
m = GeminiModel('gemini-1.5-flash', provider=GoogleGLAProvider(api_key=gemini_api_key))
|
|
1057
|
+
agent = Agent(m)
|
|
1058
|
+
|
|
1059
|
+
with pytest.warns(UserWarning, match='.*additionalProperties.*'):
|
|
1060
|
+
|
|
1061
|
+
@agent.tool_plain
|
|
1062
|
+
async def get_temperature(location: dict[str, CurrentLocation]) -> float: # pragma: no cover
|
|
1063
|
+
return 20.0
|
|
1064
|
+
|
|
1065
|
+
result = await agent.run('What is the temperature in Tokyo?')
|
|
1066
|
+
assert result.output == snapshot(
|
|
1067
|
+
'I need a location dictionary to use the `get_temperature` function. I cannot provide the temperature in Tokyo without more information.\n'
|
|
1068
|
+
)
|
|
@@ -1215,3 +1215,41 @@ async def test_openai_model_without_system_prompt(allow_model_requests: None, op
|
|
|
1215
1215
|
assert result.output == snapshot(
|
|
1216
1216
|
"That's right—I am a potato! A spud of many talents, here to help you out. How can this humble potato be of service today?"
|
|
1217
1217
|
)
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
@pytest.mark.vcr()
|
|
1221
|
+
async def test_openai_instructions_with_tool_calls_keep_instructions(allow_model_requests: None, openai_api_key: str):
|
|
1222
|
+
m = OpenAIModel('gpt-4.1-mini', provider=OpenAIProvider(api_key=openai_api_key))
|
|
1223
|
+
agent = Agent(m, instructions='You are a helpful assistant.')
|
|
1224
|
+
|
|
1225
|
+
@agent.tool_plain
|
|
1226
|
+
async def get_temperature(city: str) -> float:
|
|
1227
|
+
return 20.0
|
|
1228
|
+
|
|
1229
|
+
result = await agent.run('What is the temperature in Tokyo?')
|
|
1230
|
+
assert result.all_messages() == snapshot(
|
|
1231
|
+
[
|
|
1232
|
+
ModelRequest(
|
|
1233
|
+
parts=[UserPromptPart(content='What is the temperature in Tokyo?', timestamp=IsDatetime())],
|
|
1234
|
+
instructions='You are a helpful assistant.',
|
|
1235
|
+
),
|
|
1236
|
+
ModelResponse(
|
|
1237
|
+
parts=[ToolCallPart(tool_name='get_temperature', args='{"city":"Tokyo"}', tool_call_id=IsStr())],
|
|
1238
|
+
model_name='gpt-4.1-mini-2025-04-14',
|
|
1239
|
+
timestamp=IsDatetime(),
|
|
1240
|
+
),
|
|
1241
|
+
ModelRequest(
|
|
1242
|
+
parts=[
|
|
1243
|
+
ToolReturnPart(
|
|
1244
|
+
tool_name='get_temperature', content=20.0, tool_call_id=IsStr(), timestamp=IsDatetime()
|
|
1245
|
+
)
|
|
1246
|
+
],
|
|
1247
|
+
instructions='You are a helpful assistant.',
|
|
1248
|
+
),
|
|
1249
|
+
ModelResponse(
|
|
1250
|
+
parts=[TextPart(content='The temperature in Tokyo is currently 20.0 degrees Celsius.')],
|
|
1251
|
+
model_name='gpt-4.1-mini-2025-04-14',
|
|
1252
|
+
timestamp=IsDatetime(),
|
|
1253
|
+
),
|
|
1254
|
+
]
|
|
1255
|
+
)
|
|
@@ -829,6 +829,7 @@ def test_call_tool_without_unrequired_parameters(set_event_loop: None):
|
|
|
829
829
|
ToolCallPart(tool_name='my_tool', args={'a': 13, 'b': 4}),
|
|
830
830
|
ToolCallPart(tool_name='my_tool_plain', args={'b': 17}),
|
|
831
831
|
ToolCallPart(tool_name='my_tool_plain', args={'a': 4, 'b': 17}),
|
|
832
|
+
ToolCallPart(tool_name='no_args_tool', args=''),
|
|
832
833
|
]
|
|
833
834
|
)
|
|
834
835
|
else:
|
|
@@ -836,6 +837,10 @@ def test_call_tool_without_unrequired_parameters(set_event_loop: None):
|
|
|
836
837
|
|
|
837
838
|
agent = Agent(FunctionModel(call_tools_first))
|
|
838
839
|
|
|
840
|
+
@agent.tool_plain
|
|
841
|
+
def no_args_tool() -> None:
|
|
842
|
+
return None
|
|
843
|
+
|
|
839
844
|
@agent.tool
|
|
840
845
|
def my_tool(ctx: RunContext[None], a: int, b: int = 2) -> int:
|
|
841
846
|
return a + b
|
|
@@ -858,9 +863,10 @@ def test_call_tool_without_unrequired_parameters(set_event_loop: None):
|
|
|
858
863
|
{'a': 13, 'b': 4},
|
|
859
864
|
{'b': 17},
|
|
860
865
|
{'a': 4, 'b': 17},
|
|
866
|
+
'',
|
|
861
867
|
]
|
|
862
868
|
)
|
|
863
|
-
assert tool_returns == snapshot([15, 17, 51, 68])
|
|
869
|
+
assert tool_returns == snapshot([15, 17, 51, 68, None])
|
|
864
870
|
|
|
865
871
|
|
|
866
872
|
def test_schema_generator():
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/cassettes/test_settings/test_stop_settings[groq].yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_gemini/test_image_url_input.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_groq/test_image_url_input.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai-0.1.0 → pydantic_ai-0.1.1}/tests/models/cassettes/test_openai/test_user_id.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|