pydantic-ai 1.41.0__tar.gz → 1.44.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai-1.41.0 → pydantic_ai-1.44.0}/PKG-INFO +12 -12
- {pydantic_ai-1.41.0 → pydantic_ai-1.44.0}/README.md +1 -1
- {pydantic_ai-1.41.0 → pydantic_ai-1.44.0}/pyproject.toml +3 -1
- {pydantic_ai-1.41.0 → pydantic_ai-1.44.0}/.gitignore +0 -0
- {pydantic_ai-1.41.0 → pydantic_ai-1.44.0}/LICENSE +0 -0
- {pydantic_ai-1.41.0 → pydantic_ai-1.44.0}/Makefile +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.44.0
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -26,27 +26,27 @@ Classifier: Topic :: Internet
|
|
|
26
26
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
27
27
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
28
28
|
Requires-Python: >=3.10
|
|
29
|
-
Requires-Dist: pydantic-ai-slim[ag-ui,anthropic,bedrock,cli,cohere,evals,fastmcp,google,groq,huggingface,logfire,mcp,mistral,openai,retries,temporal,ui,vertexai]==1.
|
|
29
|
+
Requires-Dist: pydantic-ai-slim[ag-ui,anthropic,bedrock,cli,cohere,evals,fastmcp,google,groq,huggingface,logfire,mcp,mistral,openai,retries,temporal,ui,vertexai]==1.44.0
|
|
30
30
|
Provides-Extra: a2a
|
|
31
31
|
Requires-Dist: fasta2a>=0.4.1; extra == 'a2a'
|
|
32
32
|
Provides-Extra: dbos
|
|
33
|
-
Requires-Dist: pydantic-ai-slim[dbos]==1.
|
|
33
|
+
Requires-Dist: pydantic-ai-slim[dbos]==1.44.0; extra == 'dbos'
|
|
34
34
|
Provides-Extra: examples
|
|
35
|
-
Requires-Dist: pydantic-ai-examples==1.
|
|
35
|
+
Requires-Dist: pydantic-ai-examples==1.44.0; extra == 'examples'
|
|
36
36
|
Provides-Extra: outlines-llamacpp
|
|
37
|
-
Requires-Dist: pydantic-ai-slim[outlines-llamacpp]==1.
|
|
37
|
+
Requires-Dist: pydantic-ai-slim[outlines-llamacpp]==1.44.0; extra == 'outlines-llamacpp'
|
|
38
38
|
Provides-Extra: outlines-mlxlm
|
|
39
|
-
Requires-Dist: pydantic-ai-slim[outlines-mlxlm]==1.
|
|
39
|
+
Requires-Dist: pydantic-ai-slim[outlines-mlxlm]==1.44.0; (platform_system == 'Darwin' and platform_machine == 'arm64') and extra == 'outlines-mlxlm'
|
|
40
40
|
Provides-Extra: outlines-sglang
|
|
41
|
-
Requires-Dist: pydantic-ai-slim[outlines-sglang]==1.
|
|
41
|
+
Requires-Dist: pydantic-ai-slim[outlines-sglang]==1.44.0; extra == 'outlines-sglang'
|
|
42
42
|
Provides-Extra: outlines-transformers
|
|
43
|
-
Requires-Dist: pydantic-ai-slim[outlines-transformers]==1.
|
|
43
|
+
Requires-Dist: pydantic-ai-slim[outlines-transformers]==1.44.0; extra == 'outlines-transformers'
|
|
44
44
|
Provides-Extra: outlines-vllm-offline
|
|
45
|
-
Requires-Dist: pydantic-ai-slim[outlines-vllm-offline]==1.
|
|
45
|
+
Requires-Dist: pydantic-ai-slim[outlines-vllm-offline]==1.44.0; extra == 'outlines-vllm-offline'
|
|
46
46
|
Provides-Extra: prefect
|
|
47
|
-
Requires-Dist: pydantic-ai-slim[prefect]==1.
|
|
47
|
+
Requires-Dist: pydantic-ai-slim[prefect]==1.44.0; extra == 'prefect'
|
|
48
48
|
Provides-Extra: sentence-transformers
|
|
49
|
-
Requires-Dist: pydantic-ai-slim[sentence-transformers]==1.
|
|
49
|
+
Requires-Dist: pydantic-ai-slim[sentence-transformers]==1.44.0; extra == 'sentence-transformers'
|
|
50
50
|
Description-Content-Type: text/markdown
|
|
51
51
|
|
|
52
52
|
<div align="center">
|
|
@@ -90,7 +90,7 @@ We built Pydantic AI with one simple aim: to bring that FastAPI feeling to GenAI
|
|
|
90
90
|
[Pydantic Validation](https://docs.pydantic.dev/latest/) is the validation layer of the OpenAI SDK, the Google ADK, the Anthropic SDK, LangChain, LlamaIndex, AutoGPT, Transformers, CrewAI, Instructor and many more. _Why use the derivative when you can go straight to the source?_ :smiley:
|
|
91
91
|
|
|
92
92
|
2. **Model-agnostic**:
|
|
93
|
-
Supports virtually every [model](https://ai.pydantic.dev/models/overview) and provider: OpenAI, Anthropic, Gemini, DeepSeek, Grok, Cohere, Mistral, and Perplexity; Azure AI Foundry, Amazon Bedrock, Google Vertex AI, Ollama, LiteLLM, Groq, OpenRouter, Together AI, Fireworks AI, Cerebras, Hugging Face, GitHub, Heroku, Vercel, Nebius, OVHcloud, Alibaba Cloud, and Outlines. If your favorite model or provider is not listed, you can easily implement a [custom model](https://ai.pydantic.dev/models/overview#custom-models).
|
|
93
|
+
Supports virtually every [model](https://ai.pydantic.dev/models/overview) and provider: OpenAI, Anthropic, Gemini, DeepSeek, Grok, Cohere, Mistral, and Perplexity; Azure AI Foundry, Amazon Bedrock, Google Vertex AI, Ollama, LiteLLM, Groq, OpenRouter, Together AI, Fireworks AI, Cerebras, Hugging Face, GitHub, Heroku, Vercel, Nebius, OVHcloud, Alibaba Cloud, SambaNova, and Outlines. If your favorite model or provider is not listed, you can easily implement a [custom model](https://ai.pydantic.dev/models/overview#custom-models).
|
|
94
94
|
|
|
95
95
|
3. **Seamless Observability**:
|
|
96
96
|
Tightly [integrates](https://ai.pydantic.dev/logfire) with [Pydantic Logfire](https://pydantic.dev/logfire), our general-purpose OpenTelemetry observability platform, for real-time debugging, evals-based performance monitoring, and behavior, tracing, and cost tracking. If you already have an observability platform that supports OTel, you can [use that too](https://ai.pydantic.dev/logfire#alternative-observability-backends).
|
|
@@ -39,7 +39,7 @@ We built Pydantic AI with one simple aim: to bring that FastAPI feeling to GenAI
|
|
|
39
39
|
[Pydantic Validation](https://docs.pydantic.dev/latest/) is the validation layer of the OpenAI SDK, the Google ADK, the Anthropic SDK, LangChain, LlamaIndex, AutoGPT, Transformers, CrewAI, Instructor and many more. _Why use the derivative when you can go straight to the source?_ :smiley:
|
|
40
40
|
|
|
41
41
|
2. **Model-agnostic**:
|
|
42
|
-
Supports virtually every [model](https://ai.pydantic.dev/models/overview) and provider: OpenAI, Anthropic, Gemini, DeepSeek, Grok, Cohere, Mistral, and Perplexity; Azure AI Foundry, Amazon Bedrock, Google Vertex AI, Ollama, LiteLLM, Groq, OpenRouter, Together AI, Fireworks AI, Cerebras, Hugging Face, GitHub, Heroku, Vercel, Nebius, OVHcloud, Alibaba Cloud, and Outlines. If your favorite model or provider is not listed, you can easily implement a [custom model](https://ai.pydantic.dev/models/overview#custom-models).
|
|
42
|
+
Supports virtually every [model](https://ai.pydantic.dev/models/overview) and provider: OpenAI, Anthropic, Gemini, DeepSeek, Grok, Cohere, Mistral, and Perplexity; Azure AI Foundry, Amazon Bedrock, Google Vertex AI, Ollama, LiteLLM, Groq, OpenRouter, Together AI, Fireworks AI, Cerebras, Hugging Face, GitHub, Heroku, Vercel, Nebius, OVHcloud, Alibaba Cloud, SambaNova, and Outlines. If your favorite model or provider is not listed, you can easily implement a [custom model](https://ai.pydantic.dev/models/overview#custom-models).
|
|
43
43
|
|
|
44
44
|
3. **Seamless Observability**:
|
|
45
45
|
Tightly [integrates](https://ai.pydantic.dev/logfire) with [Pydantic Logfire](https://pydantic.dev/logfire), our general-purpose OpenTelemetry observability platform, for real-time debugging, evals-based performance monitoring, and behavior, tracing, and cost tracking. If you already have an observability platform that supports OTel, you can [use that too](https://ai.pydantic.dev/logfire#alternative-observability-backends).
|
|
@@ -98,6 +98,7 @@ dev = [
|
|
|
98
98
|
"coverage[toml]>=7.10.7",
|
|
99
99
|
"dirty-equals>=0.9.0",
|
|
100
100
|
"duckduckgo-search>=7.0.0",
|
|
101
|
+
"exa-py>=2.0.0",
|
|
101
102
|
"inline-snapshot>=0.19.3",
|
|
102
103
|
"pytest>=9.0.0",
|
|
103
104
|
"pytest-examples>=0.0.18",
|
|
@@ -263,7 +264,8 @@ include = [
|
|
|
263
264
|
]
|
|
264
265
|
omit = [
|
|
265
266
|
"tests/example_modules/*.py",
|
|
266
|
-
"pydantic_ai_slim/pydantic_ai/ext/aci.py",
|
|
267
|
+
"pydantic_ai_slim/pydantic_ai/ext/aci.py", # aci-sdk is too niche to be added as an (optional) dependency
|
|
268
|
+
"pydantic_ai_slim/pydantic_ai/common_tools/exa.py", # exa-py integration with external API calls
|
|
267
269
|
# TODO(Marcelo): Enable prefect coverage again.
|
|
268
270
|
"pydantic_ai_slim/pydantic_ai/durable_exec/prefect/*.py",
|
|
269
271
|
"tests/test_prefect.py",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|