pydantic-ai 0.2.1__tar.gz → 0.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai might be problematic. Click here for more details.
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/PKG-INFO +3 -3
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/pyproject.toml +9 -1
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_evaluator_common.py +59 -2
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_llm_as_a_judge.py +59 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/json_body_serializer.py +6 -6
- pydantic_ai-0.2.3/tests/providers/__init__.py +0 -0
- pydantic_ai-0.2.3/tests/test_a2a.py +352 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_cli.py +98 -0
- pydantic_ai-0.2.3/tests/test_direct.py +117 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_examples.py +5 -0
- pydantic_ai-0.2.3/tests/test_messages.py +255 -0
- pydantic_ai-0.2.1/tests/test_messages.py +0 -95
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/.gitignore +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/LICENSE +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/Makefile +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/README.md +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/__init__.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/assets/dummy.pdf +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/assets/kiwi.png +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/assets/marcelo.mp3 +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/assets/small_video.mp4 +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_agent_with_stdio_server.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_dict.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_error.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_image.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_image_resource.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_multiple_items.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_none.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_str.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_mcp/test_tool_returning_text_resource.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[anthropic].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[bedrock].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[cohere].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[gemini].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[groq].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[mistral].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/cassettes/test_settings/test_stop_settings[openai].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/conftest.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/__init__.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_dataset.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_evaluator_base.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_evaluator_context.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_evaluator_spec.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_evaluators.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_otel.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_render_numbers.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_reporting.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_reports.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/test_utils.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/evals/utils.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.2.1/tests/graph → pydantic_ai-0.2.3/tests/fasta2a}/__init__.py +0 -0
- {pydantic_ai-0.2.1/tests/models → pydantic_ai-0.2.3/tests/graph}/__init__.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/graph/test_file_persistence.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/graph/test_graph.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/graph/test_mermaid.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/graph/test_persistence.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/graph/test_state.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/graph/test_utils.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/import_examples.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/mcp_server.py +0 -0
- {pydantic_ai-0.2.1/tests/providers → pydantic_ai-0.2.3/tests/models}/__init__.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_anthropic_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_document_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_extra_headers.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_image_url_input_invalid_mime_type.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_multiple_parallel_tool_calls.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_anthropic/test_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_empty_system_prompt.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_anthropic_model_without_tools.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_guardrail_config.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_iter_stream.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_max_tokens.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_other_parameters.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_performance_config.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_retry.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_stream.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_structured_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_model_top_p.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_bedrock_multiple_documents_in_history.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_text_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_video_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_bedrock/test_video_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_cohere/test_cohere_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_cohere/test_request_simple_success_with_vcr.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_false.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_gemini_additional_properties_is_true.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_gemini_drop_exclusive_maximum.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_gemini_exclusive_minimum_and_maximum.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_gemini_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_video_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_gemini/test_video_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_groq/test_extra_headers.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_groq/test_groq_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_groq/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_groq/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_groq/test_image_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_mistral/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_mistral/test_mistral_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_document_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_extra_headers.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_image_url_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4.5-preview].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_max_completion_tokens[gpt-4o-mini].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_max_completion_tokens[o3-mini].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_multiple_agent_tool_calls.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_openai_audio_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_openai_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_openai_instructions_with_tool_calls_keep_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_openai_model_without_system_prompt.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[developer].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_openai_o1_mini_system_role[system].yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai/test_user_id.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_audio_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_image_as_binary_content_tool_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_as_binary_content_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_image_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_builtin_tools.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_http_error.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_instructions.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_retry.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_model_simple_response_with_tool_call.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_output_type.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_effort.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_reasoning_generate_summary.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_stream.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_system_prompt.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/cassettes/test_openai_responses/test_openai_responses_text_document_url_input.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_anthropic.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_bedrock.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_cohere.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_fallback.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_gemini.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_groq.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_instrumented.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_mistral.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_model_request_parameters.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_openai.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/models/test_openai_responses.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/cassettes/test_azure/test_azure_provider_call.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/cassettes/test_google_vertex/test_vertexai_provider.yaml +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_anthropic.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_azure.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_bedrock.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_cohere.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_deepseek.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_google_gla.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_google_vertex.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_groq.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_mistral.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_openai.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/providers/test_provider_names.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_agent.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_deps.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_json_body_serializer.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_live.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_logfire.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_mcp.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_settings.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_streaming.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_tools.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_usage_limits.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/test_utils.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/typed_agent.py +0 -0
- {pydantic_ai-0.2.1 → pydantic_ai-0.2.3}/tests/typed_graph.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -28,9 +28,9 @@ Classifier: Topic :: Internet
|
|
|
28
28
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.9
|
|
31
|
-
Requires-Dist: pydantic-ai-slim[anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.2.
|
|
31
|
+
Requires-Dist: pydantic-ai-slim[a2a,anthropic,bedrock,cli,cohere,evals,groq,mcp,mistral,openai,vertexai]==0.2.3
|
|
32
32
|
Provides-Extra: examples
|
|
33
|
-
Requires-Dist: pydantic-ai-examples==0.2.
|
|
33
|
+
Requires-Dist: pydantic-ai-examples==0.2.3; extra == 'examples'
|
|
34
34
|
Provides-Extra: logfire
|
|
35
35
|
Requires-Dist: logfire>=3.11.0; extra == 'logfire'
|
|
36
36
|
Description-Content-Type: text/markdown
|
|
@@ -46,7 +46,7 @@ requires-python = ">=3.9"
|
|
|
46
46
|
|
|
47
47
|
[tool.hatch.metadata.hooks.uv-dynamic-versioning]
|
|
48
48
|
dependencies = [
|
|
49
|
-
"pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli,mcp,evals]=={{ version }}",
|
|
49
|
+
"pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere,bedrock,cli,mcp,evals,a2a]=={{ version }}",
|
|
50
50
|
]
|
|
51
51
|
|
|
52
52
|
[tool.hatch.metadata.hooks.uv-dynamic-versioning.optional-dependencies]
|
|
@@ -67,6 +67,7 @@ pydantic-ai-slim = { workspace = true }
|
|
|
67
67
|
pydantic-evals = { workspace = true }
|
|
68
68
|
pydantic-graph = { workspace = true }
|
|
69
69
|
pydantic-ai-examples = { workspace = true }
|
|
70
|
+
fasta2a = { workspace = true }
|
|
70
71
|
|
|
71
72
|
[tool.uv.workspace]
|
|
72
73
|
members = [
|
|
@@ -76,6 +77,7 @@ members = [
|
|
|
76
77
|
"mcp-run-python",
|
|
77
78
|
"clai",
|
|
78
79
|
"examples",
|
|
80
|
+
"fasta2a",
|
|
79
81
|
]
|
|
80
82
|
|
|
81
83
|
[dependency-groups]
|
|
@@ -105,6 +107,7 @@ include = [
|
|
|
105
107
|
"pydantic_evals/**/*.py",
|
|
106
108
|
"pydantic_graph/**/*.py",
|
|
107
109
|
"mcp-run-python/**/*.py",
|
|
110
|
+
"fasta2a/**/*.py",
|
|
108
111
|
"examples/**/*.py",
|
|
109
112
|
"clai/**/*.py",
|
|
110
113
|
"tests/**/*.py",
|
|
@@ -162,6 +165,7 @@ include = [
|
|
|
162
165
|
"pydantic_evals",
|
|
163
166
|
"pydantic_graph",
|
|
164
167
|
"mcp-run-python",
|
|
168
|
+
"fasta2a",
|
|
165
169
|
"tests",
|
|
166
170
|
"examples",
|
|
167
171
|
"clai",
|
|
@@ -187,6 +191,8 @@ testpaths = "tests"
|
|
|
187
191
|
xfail_strict = true
|
|
188
192
|
filterwarnings = [
|
|
189
193
|
"error",
|
|
194
|
+
# Issue with python-multipart - we don't want to bump the minimum version of starlette.
|
|
195
|
+
"ignore::PendingDeprecationWarning:starlette",
|
|
190
196
|
# boto3
|
|
191
197
|
"ignore::DeprecationWarning:botocore.*",
|
|
192
198
|
"ignore::RuntimeWarning:pydantic_ai.mcp",
|
|
@@ -205,6 +211,8 @@ include = [
|
|
|
205
211
|
"pydantic_ai_slim/**/*.py",
|
|
206
212
|
"pydantic_evals/**/*.py",
|
|
207
213
|
"pydantic_graph/**/*.py",
|
|
214
|
+
# TODO(Marcelo): Add 100% coverage for A2A.
|
|
215
|
+
# "fasta2a/**/*.py",
|
|
208
216
|
"tests/**/*.py",
|
|
209
217
|
]
|
|
210
218
|
omit = ["tests/test_live.py", "tests/example_modules/*.py"]
|
|
@@ -7,6 +7,8 @@ import pytest
|
|
|
7
7
|
from inline_snapshot import snapshot
|
|
8
8
|
from pytest_mock import MockerFixture
|
|
9
9
|
|
|
10
|
+
from pydantic_ai.settings import ModelSettings
|
|
11
|
+
|
|
10
12
|
from ..conftest import try_import
|
|
11
13
|
|
|
12
14
|
with try_import() as imports_successful:
|
|
@@ -222,7 +224,7 @@ async def test_llm_judge_evaluator(mocker: MockerFixture):
|
|
|
222
224
|
assert result.value is True
|
|
223
225
|
assert result.reason == 'Test passed'
|
|
224
226
|
|
|
225
|
-
mock_judge_output.assert_called_once_with('Hello world', 'Content contains a greeting', None)
|
|
227
|
+
mock_judge_output.assert_called_once_with('Hello world', 'Content contains a greeting', None, None)
|
|
226
228
|
|
|
227
229
|
# Test with input
|
|
228
230
|
evaluator = LLMJudge(rubric='Output contains input', include_input=True, model='openai:gpt-4o')
|
|
@@ -232,7 +234,7 @@ async def test_llm_judge_evaluator(mocker: MockerFixture):
|
|
|
232
234
|
assert result.reason == 'Test passed'
|
|
233
235
|
|
|
234
236
|
mock_judge_input_output.assert_called_once_with(
|
|
235
|
-
{'prompt': 'Hello'}, 'Hello world', 'Output contains input', 'openai:gpt-4o'
|
|
237
|
+
{'prompt': 'Hello'}, 'Hello world', 'Output contains input', 'openai:gpt-4o', None
|
|
236
238
|
)
|
|
237
239
|
|
|
238
240
|
# Test with failing result
|
|
@@ -244,6 +246,61 @@ async def test_llm_judge_evaluator(mocker: MockerFixture):
|
|
|
244
246
|
assert result.reason == 'Test failed'
|
|
245
247
|
|
|
246
248
|
|
|
249
|
+
@pytest.mark.anyio
|
|
250
|
+
async def test_llm_judge_evaluator_with_model_settings(mocker: MockerFixture):
|
|
251
|
+
"""Test LLMJudge evaluator with specific model_settings."""
|
|
252
|
+
mock_grading_output = mocker.MagicMock()
|
|
253
|
+
mock_grading_output.pass_ = True
|
|
254
|
+
mock_grading_output.reason = 'Test passed with settings'
|
|
255
|
+
|
|
256
|
+
mock_judge_output = mocker.patch('pydantic_evals.evaluators.llm_as_a_judge.judge_output')
|
|
257
|
+
mock_judge_output.return_value = mock_grading_output
|
|
258
|
+
|
|
259
|
+
mock_judge_input_output = mocker.patch('pydantic_evals.evaluators.llm_as_a_judge.judge_input_output')
|
|
260
|
+
mock_judge_input_output.return_value = mock_grading_output
|
|
261
|
+
|
|
262
|
+
custom_model_settings = ModelSettings(temperature=0.77)
|
|
263
|
+
|
|
264
|
+
ctx = EvaluatorContext(
|
|
265
|
+
name='test_custom_settings',
|
|
266
|
+
inputs={'prompt': 'Hello Custom'},
|
|
267
|
+
metadata=None,
|
|
268
|
+
expected_output=None,
|
|
269
|
+
output='Hello world custom settings',
|
|
270
|
+
duration=0.0,
|
|
271
|
+
_span_tree=SpanTreeRecordingError('spans were not recorded'),
|
|
272
|
+
attributes={},
|
|
273
|
+
metrics={},
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Test without input, with custom model_settings
|
|
277
|
+
evaluator_no_input = LLMJudge(rubric='Greeting with custom settings', model_settings=custom_model_settings)
|
|
278
|
+
result_no_input = await evaluator_no_input.evaluate(ctx)
|
|
279
|
+
assert result_no_input.value is True
|
|
280
|
+
assert result_no_input.reason == 'Test passed with settings'
|
|
281
|
+
mock_judge_output.assert_called_once_with(
|
|
282
|
+
'Hello world custom settings', 'Greeting with custom settings', None, custom_model_settings
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
# Test with input, with custom model_settings
|
|
286
|
+
evaluator_with_input = LLMJudge(
|
|
287
|
+
rubric='Output contains input with custom settings',
|
|
288
|
+
include_input=True,
|
|
289
|
+
model='openai:gpt-3.5-turbo',
|
|
290
|
+
model_settings=custom_model_settings,
|
|
291
|
+
)
|
|
292
|
+
result_with_input = await evaluator_with_input.evaluate(ctx)
|
|
293
|
+
assert result_with_input.value is True
|
|
294
|
+
assert result_with_input.reason == 'Test passed with settings'
|
|
295
|
+
mock_judge_input_output.assert_called_once_with(
|
|
296
|
+
{'prompt': 'Hello Custom'},
|
|
297
|
+
'Hello world custom settings',
|
|
298
|
+
'Output contains input with custom settings',
|
|
299
|
+
'openai:gpt-3.5-turbo',
|
|
300
|
+
custom_model_settings,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
|
|
247
304
|
async def test_python():
|
|
248
305
|
"""Test Python evaluator."""
|
|
249
306
|
evaluator = Python(expression='ctx.output > 0')
|
|
@@ -6,6 +6,7 @@ from pytest_mock import MockerFixture
|
|
|
6
6
|
from ..conftest import try_import
|
|
7
7
|
|
|
8
8
|
with try_import() as imports_successful:
|
|
9
|
+
from pydantic_ai.settings import ModelSettings
|
|
9
10
|
from pydantic_evals.evaluators.llm_as_a_judge import (
|
|
10
11
|
GradingOutput,
|
|
11
12
|
_stringify, # pyright: ignore[reportPrivateUsage]
|
|
@@ -87,6 +88,34 @@ async def test_judge_output_mock(mocker: MockerFixture):
|
|
|
87
88
|
assert '<Rubric>\nContent contains a greeting\n</Rubric>' in call_args[0]
|
|
88
89
|
|
|
89
90
|
|
|
91
|
+
@pytest.mark.anyio
|
|
92
|
+
async def test_judge_output_with_model_settings_mock(mocker: MockerFixture):
|
|
93
|
+
"""Test judge_output function with model_settings and mocked agent."""
|
|
94
|
+
mock_result = mocker.MagicMock()
|
|
95
|
+
mock_result.output = GradingOutput(reason='Test passed with settings', pass_=True, score=1.0)
|
|
96
|
+
mock_run = mocker.patch('pydantic_ai.Agent.run', return_value=mock_result)
|
|
97
|
+
|
|
98
|
+
test_model_settings = ModelSettings(temperature=1)
|
|
99
|
+
|
|
100
|
+
grading_output = await judge_output(
|
|
101
|
+
'Hello world settings',
|
|
102
|
+
'Content contains a greeting with settings',
|
|
103
|
+
model_settings=test_model_settings,
|
|
104
|
+
)
|
|
105
|
+
assert isinstance(grading_output, GradingOutput)
|
|
106
|
+
assert grading_output.reason == 'Test passed with settings'
|
|
107
|
+
assert grading_output.pass_ is True
|
|
108
|
+
assert grading_output.score == 1.0
|
|
109
|
+
|
|
110
|
+
mock_run.assert_called_once()
|
|
111
|
+
call_args, call_kwargs = mock_run.call_args
|
|
112
|
+
assert '<Output>\nHello world settings\n</Output>' in call_args[0]
|
|
113
|
+
assert '<Rubric>\nContent contains a greeting with settings\n</Rubric>' in call_args[0]
|
|
114
|
+
assert call_kwargs['model_settings'] == test_model_settings
|
|
115
|
+
# Check if 'model' kwarg is passed, its value will be the default model or None
|
|
116
|
+
assert 'model' in call_kwargs
|
|
117
|
+
|
|
118
|
+
|
|
90
119
|
@pytest.mark.anyio
|
|
91
120
|
async def test_judge_input_output_mock(mocker: MockerFixture):
|
|
92
121
|
"""Test judge_input_output function with mocked agent."""
|
|
@@ -108,3 +137,33 @@ async def test_judge_input_output_mock(mocker: MockerFixture):
|
|
|
108
137
|
assert '<Input>\nHello\n</Input>' in call_args[0]
|
|
109
138
|
assert '<Output>\nHello world\n</Output>' in call_args[0]
|
|
110
139
|
assert '<Rubric>\nOutput contains input\n</Rubric>' in call_args[0]
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@pytest.mark.anyio
|
|
143
|
+
async def test_judge_input_output_with_model_settings_mock(mocker: MockerFixture):
|
|
144
|
+
"""Test judge_input_output function with model_settings and mocked agent."""
|
|
145
|
+
mock_result = mocker.MagicMock()
|
|
146
|
+
mock_result.output = GradingOutput(reason='Test passed with settings', pass_=True, score=1.0)
|
|
147
|
+
mock_run = mocker.patch('pydantic_ai.Agent.run', return_value=mock_result)
|
|
148
|
+
|
|
149
|
+
test_model_settings = ModelSettings(temperature=1)
|
|
150
|
+
|
|
151
|
+
result = await judge_input_output(
|
|
152
|
+
'Hello settings',
|
|
153
|
+
'Hello world with settings',
|
|
154
|
+
'Output contains input with settings',
|
|
155
|
+
model_settings=test_model_settings,
|
|
156
|
+
)
|
|
157
|
+
assert isinstance(result, GradingOutput)
|
|
158
|
+
assert result.reason == 'Test passed with settings'
|
|
159
|
+
assert result.pass_ is True
|
|
160
|
+
assert result.score == 1.0
|
|
161
|
+
|
|
162
|
+
mock_run.assert_called_once()
|
|
163
|
+
call_args, call_kwargs = mock_run.call_args
|
|
164
|
+
assert '<Input>\nHello settings\n</Input>' in call_args[0]
|
|
165
|
+
assert '<Output>\nHello world with settings\n</Output>' in call_args[0]
|
|
166
|
+
assert '<Rubric>\nOutput contains input with settings\n</Rubric>' in call_args[0]
|
|
167
|
+
assert call_kwargs['model_settings'] == test_model_settings
|
|
168
|
+
# Check if 'model' kwarg is passed, its value will be the default model or None
|
|
169
|
+
assert 'model' in call_kwargs
|
|
@@ -6,12 +6,12 @@ from typing import TYPE_CHECKING, Any
|
|
|
6
6
|
import yaml
|
|
7
7
|
|
|
8
8
|
if TYPE_CHECKING:
|
|
9
|
-
from yaml import Dumper
|
|
9
|
+
from yaml import Dumper
|
|
10
10
|
else:
|
|
11
11
|
try:
|
|
12
|
-
from yaml import CDumper as Dumper
|
|
13
|
-
except ImportError:
|
|
14
|
-
from yaml import Dumper
|
|
12
|
+
from yaml import CDumper as Dumper
|
|
13
|
+
except ImportError: # pragma: no cover
|
|
14
|
+
from yaml import Dumper
|
|
15
15
|
|
|
16
16
|
FILTERED_HEADER_PREFIXES = ['anthropic-', 'cf-', 'x-']
|
|
17
17
|
FILTERED_HEADERS = {'authorization', 'date', 'request-id', 'server', 'user-agent', 'via', 'set-cookie', 'api-key'}
|
|
@@ -35,7 +35,7 @@ LiteralDumper.add_representer(str, str_presenter)
|
|
|
35
35
|
|
|
36
36
|
|
|
37
37
|
def deserialize(cassette_string: str):
|
|
38
|
-
cassette_dict = yaml.
|
|
38
|
+
cassette_dict = yaml.safe_load(cassette_string)
|
|
39
39
|
for interaction in cassette_dict['interactions']:
|
|
40
40
|
for kind, data in interaction.items():
|
|
41
41
|
parsed_body = data.pop('parsed_body', None)
|
|
@@ -45,7 +45,7 @@ def deserialize(cassette_string: str):
|
|
|
45
45
|
return cassette_dict
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
def serialize(cassette_dict: Any):
|
|
48
|
+
def serialize(cassette_dict: Any): # pragma: lax no cover
|
|
49
49
|
for interaction in cassette_dict['interactions']:
|
|
50
50
|
for _kind, data in interaction.items():
|
|
51
51
|
headers: dict[str, list[str]] = data.get('headers', {})
|
|
File without changes
|
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
import anyio
|
|
2
|
+
import httpx
|
|
3
|
+
import pytest
|
|
4
|
+
from asgi_lifespan import LifespanManager
|
|
5
|
+
from inline_snapshot import snapshot
|
|
6
|
+
|
|
7
|
+
from pydantic_ai import Agent
|
|
8
|
+
from pydantic_ai.messages import ModelMessage, ModelResponse, ToolCallPart
|
|
9
|
+
from pydantic_ai.models.function import AgentInfo, FunctionModel
|
|
10
|
+
|
|
11
|
+
from .conftest import IsDatetime, IsStr, try_import
|
|
12
|
+
|
|
13
|
+
with try_import() as imports_successful:
|
|
14
|
+
from fasta2a.client import A2AClient
|
|
15
|
+
from fasta2a.schema import DataPart, FilePart, Message, TextPart
|
|
16
|
+
from fasta2a.storage import InMemoryStorage
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
pytestmark = [
|
|
20
|
+
pytest.mark.skipif(not imports_successful(), reason='fasta2a not installed'),
|
|
21
|
+
pytest.mark.anyio,
|
|
22
|
+
pytest.mark.vcr,
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def return_string(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
|
|
27
|
+
assert info.output_tools is not None
|
|
28
|
+
args_json = '{"response": ["foo", "bar"]}'
|
|
29
|
+
return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, args_json)])
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
model = FunctionModel(return_string)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def test_a2a_runtime_error_without_lifespan():
|
|
36
|
+
agent = Agent(model=model, output_type=tuple[str, str])
|
|
37
|
+
app = agent.to_a2a()
|
|
38
|
+
|
|
39
|
+
transport = httpx.ASGITransport(app)
|
|
40
|
+
async with httpx.AsyncClient(transport=transport) as http_client:
|
|
41
|
+
a2a_client = A2AClient(http_client=http_client)
|
|
42
|
+
|
|
43
|
+
message = Message(role='user', parts=[TextPart(text='Hello, world!', type='text')])
|
|
44
|
+
|
|
45
|
+
with pytest.raises(RuntimeError, match='TaskManager was not properly initialized.'):
|
|
46
|
+
await a2a_client.send_task(message=message)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
async def test_a2a_simple():
|
|
50
|
+
agent = Agent(model=model, output_type=tuple[str, str])
|
|
51
|
+
app = agent.to_a2a()
|
|
52
|
+
|
|
53
|
+
async with LifespanManager(app):
|
|
54
|
+
transport = httpx.ASGITransport(app)
|
|
55
|
+
async with httpx.AsyncClient(transport=transport) as http_client:
|
|
56
|
+
a2a_client = A2AClient(http_client=http_client)
|
|
57
|
+
|
|
58
|
+
message = Message(role='user', parts=[TextPart(text='Hello, world!', type='text')])
|
|
59
|
+
response = await a2a_client.send_task(message=message)
|
|
60
|
+
assert response == snapshot(
|
|
61
|
+
{
|
|
62
|
+
'jsonrpc': '2.0',
|
|
63
|
+
'id': IsStr(),
|
|
64
|
+
'result': {
|
|
65
|
+
'id': IsStr(),
|
|
66
|
+
'session_id': IsStr(),
|
|
67
|
+
'status': {'state': 'submitted', 'timestamp': IsDatetime(iso_string=True)},
|
|
68
|
+
'history': [{'role': 'user', 'parts': [{'type': 'text', 'text': 'Hello, world!'}]}],
|
|
69
|
+
},
|
|
70
|
+
}
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
assert 'result' in response
|
|
74
|
+
task_id = response['result']['id']
|
|
75
|
+
|
|
76
|
+
while task := await a2a_client.get_task(task_id):
|
|
77
|
+
if 'result' in task and task['result']['status']['state'] == 'completed':
|
|
78
|
+
break
|
|
79
|
+
await anyio.sleep(0.1)
|
|
80
|
+
assert task == snapshot(
|
|
81
|
+
{
|
|
82
|
+
'jsonrpc': '2.0',
|
|
83
|
+
'id': None,
|
|
84
|
+
'result': {
|
|
85
|
+
'id': IsStr(),
|
|
86
|
+
'session_id': IsStr(),
|
|
87
|
+
'status': {'state': 'completed', 'timestamp': IsDatetime(iso_string=True)},
|
|
88
|
+
'history': [{'role': 'user', 'parts': [{'type': 'text', 'text': 'Hello, world!'}]}],
|
|
89
|
+
'artifacts': [
|
|
90
|
+
{'name': 'result', 'parts': [{'type': 'text', 'text': "('foo', 'bar')"}], 'index': 0}
|
|
91
|
+
],
|
|
92
|
+
},
|
|
93
|
+
}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
async def test_a2a_file_message_with_file():
|
|
98
|
+
agent = Agent(model=model, output_type=tuple[str, str])
|
|
99
|
+
app = agent.to_a2a()
|
|
100
|
+
|
|
101
|
+
async with LifespanManager(app):
|
|
102
|
+
transport = httpx.ASGITransport(app)
|
|
103
|
+
async with httpx.AsyncClient(transport=transport) as http_client:
|
|
104
|
+
a2a_client = A2AClient(http_client=http_client)
|
|
105
|
+
|
|
106
|
+
message = Message(
|
|
107
|
+
role='user',
|
|
108
|
+
parts=[
|
|
109
|
+
FilePart(
|
|
110
|
+
type='file',
|
|
111
|
+
file={'url': 'https://example.com/file.txt', 'mime_type': 'text/plain'},
|
|
112
|
+
)
|
|
113
|
+
],
|
|
114
|
+
)
|
|
115
|
+
response = await a2a_client.send_task(message=message)
|
|
116
|
+
assert response == snapshot(
|
|
117
|
+
{
|
|
118
|
+
'jsonrpc': '2.0',
|
|
119
|
+
'id': IsStr(),
|
|
120
|
+
'result': {
|
|
121
|
+
'id': IsStr(),
|
|
122
|
+
'session_id': IsStr(),
|
|
123
|
+
'status': {'state': 'submitted', 'timestamp': IsDatetime(iso_string=True)},
|
|
124
|
+
'history': [
|
|
125
|
+
{
|
|
126
|
+
'role': 'user',
|
|
127
|
+
'parts': [
|
|
128
|
+
{
|
|
129
|
+
'type': 'file',
|
|
130
|
+
'file': {'mime_type': 'text/plain', 'url': 'https://example.com/file.txt'},
|
|
131
|
+
}
|
|
132
|
+
],
|
|
133
|
+
}
|
|
134
|
+
],
|
|
135
|
+
},
|
|
136
|
+
}
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
assert 'result' in response
|
|
140
|
+
task_id = response['result']['id']
|
|
141
|
+
|
|
142
|
+
while task := await a2a_client.get_task(task_id):
|
|
143
|
+
if 'result' in task and task['result']['status']['state'] == 'completed':
|
|
144
|
+
break
|
|
145
|
+
await anyio.sleep(0.1)
|
|
146
|
+
assert task == snapshot(
|
|
147
|
+
{
|
|
148
|
+
'jsonrpc': '2.0',
|
|
149
|
+
'id': None,
|
|
150
|
+
'result': {
|
|
151
|
+
'id': IsStr(),
|
|
152
|
+
'session_id': IsStr(),
|
|
153
|
+
'status': {'state': 'completed', 'timestamp': IsDatetime(iso_string=True)},
|
|
154
|
+
'history': [
|
|
155
|
+
{
|
|
156
|
+
'role': 'user',
|
|
157
|
+
'parts': [
|
|
158
|
+
{
|
|
159
|
+
'type': 'file',
|
|
160
|
+
'file': {'mime_type': 'text/plain', 'url': 'https://example.com/file.txt'},
|
|
161
|
+
}
|
|
162
|
+
],
|
|
163
|
+
}
|
|
164
|
+
],
|
|
165
|
+
'artifacts': [
|
|
166
|
+
{'name': 'result', 'parts': [{'type': 'text', 'text': "('foo', 'bar')"}], 'index': 0}
|
|
167
|
+
],
|
|
168
|
+
},
|
|
169
|
+
}
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
async def test_a2a_file_message_with_file_content():
|
|
174
|
+
agent = Agent(model=model, output_type=tuple[str, str])
|
|
175
|
+
app = agent.to_a2a()
|
|
176
|
+
|
|
177
|
+
async with LifespanManager(app):
|
|
178
|
+
transport = httpx.ASGITransport(app)
|
|
179
|
+
async with httpx.AsyncClient(transport=transport) as http_client:
|
|
180
|
+
a2a_client = A2AClient(http_client=http_client)
|
|
181
|
+
|
|
182
|
+
message = Message(
|
|
183
|
+
role='user',
|
|
184
|
+
parts=[
|
|
185
|
+
FilePart(type='file', file={'data': 'foo', 'mime_type': 'text/plain'}),
|
|
186
|
+
],
|
|
187
|
+
)
|
|
188
|
+
response = await a2a_client.send_task(message=message)
|
|
189
|
+
assert response == snapshot(
|
|
190
|
+
{
|
|
191
|
+
'jsonrpc': '2.0',
|
|
192
|
+
'id': IsStr(),
|
|
193
|
+
'result': {
|
|
194
|
+
'id': IsStr(),
|
|
195
|
+
'session_id': IsStr(),
|
|
196
|
+
'status': {'state': 'submitted', 'timestamp': IsDatetime(iso_string=True)},
|
|
197
|
+
'history': [
|
|
198
|
+
{
|
|
199
|
+
'role': 'user',
|
|
200
|
+
'parts': [{'type': 'file', 'file': {'mime_type': 'text/plain', 'data': 'foo'}}],
|
|
201
|
+
}
|
|
202
|
+
],
|
|
203
|
+
},
|
|
204
|
+
}
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
assert 'result' in response
|
|
208
|
+
task_id = response['result']['id']
|
|
209
|
+
|
|
210
|
+
while task := await a2a_client.get_task(task_id):
|
|
211
|
+
if 'result' in task and task['result']['status']['state'] == 'completed':
|
|
212
|
+
break
|
|
213
|
+
await anyio.sleep(0.1)
|
|
214
|
+
assert task == snapshot(
|
|
215
|
+
{
|
|
216
|
+
'jsonrpc': '2.0',
|
|
217
|
+
'id': None,
|
|
218
|
+
'result': {
|
|
219
|
+
'id': IsStr(),
|
|
220
|
+
'session_id': IsStr(),
|
|
221
|
+
'status': {'state': 'completed', 'timestamp': IsDatetime(iso_string=True)},
|
|
222
|
+
'history': [
|
|
223
|
+
{
|
|
224
|
+
'role': 'user',
|
|
225
|
+
'parts': [{'type': 'file', 'file': {'mime_type': 'text/plain', 'data': 'foo'}}],
|
|
226
|
+
}
|
|
227
|
+
],
|
|
228
|
+
'artifacts': [
|
|
229
|
+
{'name': 'result', 'parts': [{'type': 'text', 'text': "('foo', 'bar')"}], 'index': 0}
|
|
230
|
+
],
|
|
231
|
+
},
|
|
232
|
+
}
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
async def test_a2a_file_message_with_data():
|
|
237
|
+
agent = Agent(model=model, output_type=tuple[str, str])
|
|
238
|
+
app = agent.to_a2a()
|
|
239
|
+
|
|
240
|
+
async with LifespanManager(app):
|
|
241
|
+
transport = httpx.ASGITransport(app)
|
|
242
|
+
async with httpx.AsyncClient(transport=transport) as http_client:
|
|
243
|
+
a2a_client = A2AClient(http_client=http_client)
|
|
244
|
+
|
|
245
|
+
message = Message(
|
|
246
|
+
role='user',
|
|
247
|
+
parts=[DataPart(type='data', data={'foo': 'bar'})],
|
|
248
|
+
)
|
|
249
|
+
response = await a2a_client.send_task(message=message)
|
|
250
|
+
assert response == snapshot(
|
|
251
|
+
{
|
|
252
|
+
'jsonrpc': '2.0',
|
|
253
|
+
'id': IsStr(),
|
|
254
|
+
'result': {
|
|
255
|
+
'id': IsStr(),
|
|
256
|
+
'session_id': IsStr(),
|
|
257
|
+
'status': {'state': 'submitted', 'timestamp': IsDatetime(iso_string=True)},
|
|
258
|
+
'history': [{'role': 'user', 'parts': [{'type': 'data', 'data': {'foo': 'bar'}}]}],
|
|
259
|
+
},
|
|
260
|
+
}
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
assert 'result' in response
|
|
264
|
+
task_id = response['result']['id']
|
|
265
|
+
|
|
266
|
+
while task := await a2a_client.get_task(task_id):
|
|
267
|
+
if 'result' in task and task['result']['status']['state'] == 'failed':
|
|
268
|
+
break
|
|
269
|
+
await anyio.sleep(0.1)
|
|
270
|
+
assert task == snapshot(
|
|
271
|
+
{
|
|
272
|
+
'jsonrpc': '2.0',
|
|
273
|
+
'id': None,
|
|
274
|
+
'result': {
|
|
275
|
+
'id': IsStr(),
|
|
276
|
+
'session_id': IsStr(),
|
|
277
|
+
'status': {'state': 'failed', 'timestamp': IsDatetime(iso_string=True)},
|
|
278
|
+
'history': [{'role': 'user', 'parts': [{'type': 'data', 'data': {'foo': 'bar'}}]}],
|
|
279
|
+
},
|
|
280
|
+
}
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
async def test_a2a_multiple_messages():
|
|
285
|
+
agent = Agent(model=model, output_type=tuple[str, str])
|
|
286
|
+
storage = InMemoryStorage()
|
|
287
|
+
app = agent.to_a2a(storage=storage)
|
|
288
|
+
|
|
289
|
+
async with LifespanManager(app):
|
|
290
|
+
transport = httpx.ASGITransport(app)
|
|
291
|
+
async with httpx.AsyncClient(transport=transport) as http_client:
|
|
292
|
+
a2a_client = A2AClient(http_client=http_client)
|
|
293
|
+
|
|
294
|
+
message = Message(role='user', parts=[TextPart(text='Hello, world!', type='text')])
|
|
295
|
+
response = await a2a_client.send_task(message=message)
|
|
296
|
+
assert response == snapshot(
|
|
297
|
+
{
|
|
298
|
+
'jsonrpc': '2.0',
|
|
299
|
+
'id': IsStr(),
|
|
300
|
+
'result': {
|
|
301
|
+
'id': IsStr(),
|
|
302
|
+
'session_id': IsStr(),
|
|
303
|
+
'status': {'state': 'submitted', 'timestamp': IsDatetime(iso_string=True)},
|
|
304
|
+
'history': [{'role': 'user', 'parts': [{'type': 'text', 'text': 'Hello, world!'}]}],
|
|
305
|
+
},
|
|
306
|
+
}
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# NOTE: We include the agent history before we start working on the task.
|
|
310
|
+
assert 'result' in response
|
|
311
|
+
task_id = response['result']['id']
|
|
312
|
+
task = storage.tasks[task_id]
|
|
313
|
+
assert 'history' in task
|
|
314
|
+
task['history'].append(Message(role='agent', parts=[TextPart(text='Whats up?', type='text')]))
|
|
315
|
+
|
|
316
|
+
response = await a2a_client.get_task(task_id)
|
|
317
|
+
assert response == snapshot(
|
|
318
|
+
{
|
|
319
|
+
'jsonrpc': '2.0',
|
|
320
|
+
'id': None,
|
|
321
|
+
'result': {
|
|
322
|
+
'id': IsStr(),
|
|
323
|
+
'session_id': IsStr(),
|
|
324
|
+
'status': {'state': 'submitted', 'timestamp': IsDatetime(iso_string=True)},
|
|
325
|
+
'history': [
|
|
326
|
+
{'role': 'user', 'parts': [{'type': 'text', 'text': 'Hello, world!'}]},
|
|
327
|
+
{'role': 'agent', 'parts': [{'type': 'text', 'text': 'Whats up?'}]},
|
|
328
|
+
],
|
|
329
|
+
},
|
|
330
|
+
}
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
await anyio.sleep(0.1)
|
|
334
|
+
task = await a2a_client.get_task(task_id)
|
|
335
|
+
assert task == snapshot(
|
|
336
|
+
{
|
|
337
|
+
'jsonrpc': '2.0',
|
|
338
|
+
'id': None,
|
|
339
|
+
'result': {
|
|
340
|
+
'id': IsStr(),
|
|
341
|
+
'session_id': IsStr(),
|
|
342
|
+
'status': {'state': 'completed', 'timestamp': IsDatetime(iso_string=True)},
|
|
343
|
+
'history': [
|
|
344
|
+
{'role': 'user', 'parts': [{'type': 'text', 'text': 'Hello, world!'}]},
|
|
345
|
+
{'role': 'agent', 'parts': [{'type': 'text', 'text': 'Whats up?'}]},
|
|
346
|
+
],
|
|
347
|
+
'artifacts': [
|
|
348
|
+
{'name': 'result', 'parts': [{'type': 'text', 'text': "('foo', 'bar')"}], 'index': 0}
|
|
349
|
+
],
|
|
350
|
+
},
|
|
351
|
+
}
|
|
352
|
+
)
|