pydantic-ai-slim 1.0.3__tar.gz → 1.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/PKG-INFO +3 -3
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_parts_manager.py +3 -3
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/__init__.py +5 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/anthropic.py +0 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/bedrock.py +0 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/google.py +0 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/mistral.py +1 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/openai.py +48 -19
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/openai.py +4 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/__init__.py +3 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/anthropic.py +8 -4
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/cohere.py +2 -2
- pydantic_ai_slim-1.0.4/pydantic_ai/providers/gateway.py +187 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/google.py +2 -2
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/google_gla.py +1 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/groq.py +12 -5
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/heroku.py +2 -2
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/huggingface.py +1 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/mistral.py +1 -1
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/openai.py +13 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/.gitignore +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/LICENSE +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/README.md +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_otel_messages.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_tool_manager.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/ag_ui.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/agent/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/agent/abstract.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/agent/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/builtin_tools.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/huggingface.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/harmony.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/cerebras.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/litellm.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/ollama.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/providers/vercel.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/retries.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/run.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/approval_required.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/external.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.4
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
|
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.4
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.13.0; extra == 'dbos'
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.4; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -198,16 +198,16 @@ class ModelResponsePartsManager:
|
|
|
198
198
|
existing_thinking_part_and_index = existing_part, part_index
|
|
199
199
|
|
|
200
200
|
if existing_thinking_part_and_index is None:
|
|
201
|
-
if content is not None:
|
|
201
|
+
if content is not None or signature is not None:
|
|
202
202
|
# There is no existing thinking part that should be updated, so create a new one
|
|
203
203
|
new_part_index = len(self._parts)
|
|
204
|
-
part = ThinkingPart(content=content, id=id, signature=signature, provider_name=provider_name)
|
|
204
|
+
part = ThinkingPart(content=content or '', id=id, signature=signature, provider_name=provider_name)
|
|
205
205
|
if vendor_part_id is not None: # pragma: no branch
|
|
206
206
|
self._vendor_id_to_part_index[vendor_part_id] = new_part_index
|
|
207
207
|
self._parts.append(part)
|
|
208
208
|
return PartStartEvent(index=new_part_index, part=part)
|
|
209
209
|
else:
|
|
210
|
-
raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content')
|
|
210
|
+
raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content or signature')
|
|
211
211
|
else:
|
|
212
212
|
if content is not None or signature is not None:
|
|
213
213
|
# Update the existing ThinkingPart with the new content and/or signature delta
|
|
@@ -718,7 +718,11 @@ def infer_model(model: Model | KnownModelName | str) -> Model: # noqa: C901
|
|
|
718
718
|
)
|
|
719
719
|
provider = 'google-vertex'
|
|
720
720
|
|
|
721
|
-
if provider == '
|
|
721
|
+
if provider == 'gateway':
|
|
722
|
+
from ..providers.gateway import infer_model as infer_model_from_gateway
|
|
723
|
+
|
|
724
|
+
return infer_model_from_gateway(model_name)
|
|
725
|
+
elif provider == 'cohere':
|
|
722
726
|
from .cohere import CohereModel
|
|
723
727
|
|
|
724
728
|
return CohereModel(model_name, provider=provider)
|
|
@@ -641,7 +641,6 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
641
641
|
yield self._parts_manager.handle_thinking_delta(
|
|
642
642
|
vendor_part_id=event.index,
|
|
643
643
|
id='redacted_thinking',
|
|
644
|
-
content='',
|
|
645
644
|
signature=current_block.data,
|
|
646
645
|
provider_name=self.provider_name,
|
|
647
646
|
)
|
|
@@ -681,7 +681,6 @@ class BedrockStreamedResponse(StreamedResponse):
|
|
|
681
681
|
yield self._parts_manager.handle_thinking_delta(
|
|
682
682
|
vendor_part_id=index,
|
|
683
683
|
id='redacted_content',
|
|
684
|
-
content='',
|
|
685
684
|
signature=redacted_content.decode('utf-8'),
|
|
686
685
|
provider_name=self.provider_name,
|
|
687
686
|
)
|
|
@@ -596,7 +596,6 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
596
596
|
signature = base64.b64encode(part.thought_signature).decode('utf-8')
|
|
597
597
|
yield self._parts_manager.handle_thinking_delta(
|
|
598
598
|
vendor_part_id='thinking',
|
|
599
|
-
content='', # A thought signature may occur without a preceding thinking part, so we add an empty delta so that a new part can be created
|
|
600
599
|
signature=signature,
|
|
601
600
|
provider_name=self.provider_name,
|
|
602
601
|
)
|
|
@@ -82,7 +82,7 @@ try:
|
|
|
82
82
|
from mistralai.models.usermessage import UserMessage as MistralUserMessage
|
|
83
83
|
from mistralai.types.basemodel import Unset as MistralUnset
|
|
84
84
|
from mistralai.utils.eventstreaming import EventStreamAsync as MistralEventStreamAsync
|
|
85
|
-
except ImportError as e:
|
|
85
|
+
except ImportError as e: # pragma: lax no cover
|
|
86
86
|
raise ImportError(
|
|
87
87
|
'Please install `mistral` to use the Mistral model, '
|
|
88
88
|
'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
|
|
@@ -878,7 +878,9 @@ class OpenAIResponsesModel(Model):
|
|
|
878
878
|
if isinstance(content, responses.ResponseOutputText): # pragma: no branch
|
|
879
879
|
items.append(TextPart(content.text))
|
|
880
880
|
elif isinstance(item, responses.ResponseFunctionToolCall):
|
|
881
|
-
items.append(
|
|
881
|
+
items.append(
|
|
882
|
+
ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
|
|
883
|
+
)
|
|
882
884
|
|
|
883
885
|
finish_reason: FinishReason | None = None
|
|
884
886
|
provider_details: dict[str, Any] | None = None
|
|
@@ -980,10 +982,15 @@ class OpenAIResponsesModel(Model):
|
|
|
980
982
|
text = text or {}
|
|
981
983
|
text['verbosity'] = verbosity
|
|
982
984
|
|
|
983
|
-
|
|
985
|
+
profile = OpenAIModelProfile.from_profile(self.profile)
|
|
986
|
+
unsupported_model_settings = profile.openai_unsupported_model_settings
|
|
984
987
|
for setting in unsupported_model_settings:
|
|
985
988
|
model_settings.pop(setting, None)
|
|
986
989
|
|
|
990
|
+
include: list[responses.ResponseIncludable] | None = None
|
|
991
|
+
if profile.openai_supports_encrypted_reasoning_content:
|
|
992
|
+
include = ['reasoning.encrypted_content']
|
|
993
|
+
|
|
987
994
|
try:
|
|
988
995
|
extra_headers = model_settings.get('extra_headers', {})
|
|
989
996
|
extra_headers.setdefault('User-Agent', get_user_agent())
|
|
@@ -1004,7 +1011,7 @@ class OpenAIResponsesModel(Model):
|
|
|
1004
1011
|
reasoning=reasoning,
|
|
1005
1012
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
1006
1013
|
text=text or NOT_GIVEN,
|
|
1007
|
-
include=
|
|
1014
|
+
include=include or NOT_GIVEN,
|
|
1008
1015
|
extra_headers=extra_headers,
|
|
1009
1016
|
extra_body=model_settings.get('extra_body'),
|
|
1010
1017
|
)
|
|
@@ -1079,13 +1086,14 @@ class OpenAIResponsesModel(Model):
|
|
|
1079
1086
|
elif isinstance(part, UserPromptPart):
|
|
1080
1087
|
openai_messages.append(await self._map_user_prompt(part))
|
|
1081
1088
|
elif isinstance(part, ToolReturnPart):
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
)
|
|
1089
|
+
call_id = _guard_tool_call_id(t=part)
|
|
1090
|
+
call_id, _ = _split_combined_tool_call_id(call_id)
|
|
1091
|
+
item = FunctionCallOutput(
|
|
1092
|
+
type='function_call_output',
|
|
1093
|
+
call_id=call_id,
|
|
1094
|
+
output=part.model_response_str(),
|
|
1088
1095
|
)
|
|
1096
|
+
openai_messages.append(item)
|
|
1089
1097
|
elif isinstance(part, RetryPromptPart):
|
|
1090
1098
|
# TODO(Marcelo): How do we test this conditional branch?
|
|
1091
1099
|
if part.tool_name is None: # pragma: no cover
|
|
@@ -1093,13 +1101,14 @@ class OpenAIResponsesModel(Model):
|
|
|
1093
1101
|
Message(role='user', content=[{'type': 'input_text', 'text': part.model_response()}])
|
|
1094
1102
|
)
|
|
1095
1103
|
else:
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
)
|
|
1104
|
+
call_id = _guard_tool_call_id(t=part)
|
|
1105
|
+
call_id, _ = _split_combined_tool_call_id(call_id)
|
|
1106
|
+
item = FunctionCallOutput(
|
|
1107
|
+
type='function_call_output',
|
|
1108
|
+
call_id=call_id,
|
|
1109
|
+
output=part.model_response(),
|
|
1102
1110
|
)
|
|
1111
|
+
openai_messages.append(item)
|
|
1103
1112
|
else:
|
|
1104
1113
|
assert_never(part)
|
|
1105
1114
|
elif isinstance(message, ModelResponse):
|
|
@@ -1136,12 +1145,18 @@ class OpenAIResponsesModel(Model):
|
|
|
1136
1145
|
|
|
1137
1146
|
@staticmethod
|
|
1138
1147
|
def _map_tool_call(t: ToolCallPart) -> responses.ResponseFunctionToolCallParam:
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1148
|
+
call_id = _guard_tool_call_id(t=t)
|
|
1149
|
+
call_id, id = _split_combined_tool_call_id(call_id)
|
|
1150
|
+
|
|
1151
|
+
param = responses.ResponseFunctionToolCallParam(
|
|
1142
1152
|
name=t.tool_name,
|
|
1153
|
+
arguments=t.args_as_json_str(),
|
|
1154
|
+
call_id=call_id,
|
|
1143
1155
|
type='function_call',
|
|
1144
1156
|
)
|
|
1157
|
+
if id: # pragma: no branch
|
|
1158
|
+
param['id'] = id
|
|
1159
|
+
return param
|
|
1145
1160
|
|
|
1146
1161
|
def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
|
|
1147
1162
|
response_format_param: responses.ResponseFormatTextJSONSchemaConfigParam = {
|
|
@@ -1360,7 +1375,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1360
1375
|
vendor_part_id=chunk.item.id,
|
|
1361
1376
|
tool_name=chunk.item.name,
|
|
1362
1377
|
args=chunk.item.arguments,
|
|
1363
|
-
tool_call_id=chunk.item.call_id,
|
|
1378
|
+
tool_call_id=_combine_tool_call_ids(chunk.item.call_id, chunk.item.id),
|
|
1364
1379
|
)
|
|
1365
1380
|
elif isinstance(chunk.item, responses.ResponseReasoningItem):
|
|
1366
1381
|
pass
|
|
@@ -1501,3 +1516,17 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
|
|
|
1501
1516
|
u.input_audio_tokens = response_usage.prompt_tokens_details.audio_tokens or 0
|
|
1502
1517
|
u.cache_read_tokens = response_usage.prompt_tokens_details.cached_tokens or 0
|
|
1503
1518
|
return u
|
|
1519
|
+
|
|
1520
|
+
|
|
1521
|
+
def _combine_tool_call_ids(call_id: str, id: str | None) -> str:
|
|
1522
|
+
# When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
|
|
1523
|
+
# Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
|
|
1524
|
+
return f'{call_id}|{id}' if id else call_id
|
|
1525
|
+
|
|
1526
|
+
|
|
1527
|
+
def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
|
|
1528
|
+
if '|' in combined_id:
|
|
1529
|
+
call_id, id = combined_id.split('|', 1)
|
|
1530
|
+
return call_id, id
|
|
1531
|
+
else:
|
|
1532
|
+
return combined_id, None # pragma: no cover
|
|
@@ -41,6 +41,9 @@ class OpenAIModelProfile(ModelProfile):
|
|
|
41
41
|
openai_chat_supports_web_search: bool = False
|
|
42
42
|
"""Whether the model supports web search in Chat Completions API."""
|
|
43
43
|
|
|
44
|
+
openai_supports_encrypted_reasoning_content: bool = False
|
|
45
|
+
"""Whether the model supports including encrypted reasoning content in the response."""
|
|
46
|
+
|
|
44
47
|
def __post_init__(self): # pragma: no cover
|
|
45
48
|
if not self.openai_supports_sampling_settings:
|
|
46
49
|
warnings.warn(
|
|
@@ -84,6 +87,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
|
|
|
84
87
|
openai_unsupported_model_settings=openai_unsupported_model_settings,
|
|
85
88
|
openai_system_prompt_role=openai_system_prompt_role,
|
|
86
89
|
openai_chat_supports_web_search=supports_web_search,
|
|
90
|
+
openai_supports_encrypted_reasoning_content=is_reasoning_model,
|
|
87
91
|
)
|
|
88
92
|
|
|
89
93
|
|
|
@@ -47,6 +47,9 @@ class Provider(ABC, Generic[InterfaceClient]):
|
|
|
47
47
|
"""The model profile for the named model, if available."""
|
|
48
48
|
return None # pragma: no cover
|
|
49
49
|
|
|
50
|
+
def __repr__(self) -> str:
|
|
51
|
+
return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})'
|
|
52
|
+
|
|
50
53
|
|
|
51
54
|
def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
|
|
52
55
|
"""Infers the provider class from the provider name."""
|
|
@@ -45,12 +45,15 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
|
|
|
45
45
|
def __init__(self, *, anthropic_client: AsyncAnthropicClient | None = None) -> None: ...
|
|
46
46
|
|
|
47
47
|
@overload
|
|
48
|
-
def __init__(
|
|
48
|
+
def __init__(
|
|
49
|
+
self, *, api_key: str | None = None, base_url: str | None = None, http_client: httpx.AsyncClient | None = None
|
|
50
|
+
) -> None: ...
|
|
49
51
|
|
|
50
52
|
def __init__(
|
|
51
53
|
self,
|
|
52
54
|
*,
|
|
53
55
|
api_key: str | None = None,
|
|
56
|
+
base_url: str | None = None,
|
|
54
57
|
anthropic_client: AsyncAnthropicClient | None = None,
|
|
55
58
|
http_client: httpx.AsyncClient | None = None,
|
|
56
59
|
) -> None:
|
|
@@ -59,6 +62,7 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
|
|
|
59
62
|
Args:
|
|
60
63
|
api_key: The API key to use for authentication, if not provided, the `ANTHROPIC_API_KEY` environment variable
|
|
61
64
|
will be used if available.
|
|
65
|
+
base_url: The base URL to use for the Anthropic API.
|
|
62
66
|
anthropic_client: An existing [`AsyncAnthropic`](https://github.com/anthropics/anthropic-sdk-python)
|
|
63
67
|
client to use. If provided, the `api_key` and `http_client` arguments will be ignored.
|
|
64
68
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
@@ -68,14 +72,14 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
|
|
|
68
72
|
assert api_key is None, 'Cannot provide both `anthropic_client` and `api_key`'
|
|
69
73
|
self._client = anthropic_client
|
|
70
74
|
else:
|
|
71
|
-
api_key = api_key or os.
|
|
75
|
+
api_key = api_key or os.getenv('ANTHROPIC_API_KEY')
|
|
72
76
|
if not api_key:
|
|
73
77
|
raise UserError(
|
|
74
78
|
'Set the `ANTHROPIC_API_KEY` environment variable or pass it via `AnthropicProvider(api_key=...)`'
|
|
75
79
|
'to use the Anthropic provider.'
|
|
76
80
|
)
|
|
77
81
|
if http_client is not None:
|
|
78
|
-
self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
|
|
82
|
+
self._client = AsyncAnthropic(api_key=api_key, base_url=base_url, http_client=http_client)
|
|
79
83
|
else:
|
|
80
84
|
http_client = cached_async_http_client(provider='anthropic')
|
|
81
|
-
self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
|
|
85
|
+
self._client = AsyncAnthropic(api_key=api_key, base_url=base_url, http_client=http_client)
|
|
@@ -60,14 +60,14 @@ class CohereProvider(Provider[AsyncClientV2]):
|
|
|
60
60
|
assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
|
|
61
61
|
self._client = cohere_client
|
|
62
62
|
else:
|
|
63
|
-
api_key = api_key or os.
|
|
63
|
+
api_key = api_key or os.getenv('CO_API_KEY')
|
|
64
64
|
if not api_key:
|
|
65
65
|
raise UserError(
|
|
66
66
|
'Set the `CO_API_KEY` environment variable or pass it via `CohereProvider(api_key=...)`'
|
|
67
67
|
'to use the Cohere provider.'
|
|
68
68
|
)
|
|
69
69
|
|
|
70
|
-
base_url = os.
|
|
70
|
+
base_url = os.getenv('CO_BASE_URL')
|
|
71
71
|
if http_client is not None:
|
|
72
72
|
self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
|
|
73
73
|
else:
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""This module implements the Pydantic AI Gateway provider."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations as _annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Literal, overload
|
|
7
|
+
from urllib.parse import urljoin
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
from pydantic_ai.exceptions import UserError
|
|
12
|
+
from pydantic_ai.models import Model, cached_async_http_client, get_user_agent
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from google.genai import Client as GoogleClient
|
|
16
|
+
from groq import AsyncGroq
|
|
17
|
+
from openai import AsyncOpenAI
|
|
18
|
+
|
|
19
|
+
from pydantic_ai.models.anthropic import AsyncAnthropicClient
|
|
20
|
+
from pydantic_ai.providers import Provider
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@overload
|
|
24
|
+
def gateway_provider(
|
|
25
|
+
upstream_provider: Literal['openai', 'openai-chat', 'openai-responses'],
|
|
26
|
+
*,
|
|
27
|
+
api_key: str | None = None,
|
|
28
|
+
base_url: str | None = None,
|
|
29
|
+
http_client: httpx.AsyncClient | None = None,
|
|
30
|
+
) -> Provider[AsyncOpenAI]: ...
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@overload
|
|
34
|
+
def gateway_provider(
|
|
35
|
+
upstream_provider: Literal['groq'],
|
|
36
|
+
*,
|
|
37
|
+
api_key: str | None = None,
|
|
38
|
+
base_url: str | None = None,
|
|
39
|
+
http_client: httpx.AsyncClient | None = None,
|
|
40
|
+
) -> Provider[AsyncGroq]: ...
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@overload
|
|
44
|
+
def gateway_provider(
|
|
45
|
+
upstream_provider: Literal['google-vertex'],
|
|
46
|
+
*,
|
|
47
|
+
api_key: str | None = None,
|
|
48
|
+
base_url: str | None = None,
|
|
49
|
+
) -> Provider[GoogleClient]: ...
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@overload
|
|
53
|
+
def gateway_provider(
|
|
54
|
+
upstream_provider: Literal['anthropic'],
|
|
55
|
+
*,
|
|
56
|
+
api_key: str | None = None,
|
|
57
|
+
base_url: str | None = None,
|
|
58
|
+
) -> Provider[AsyncAnthropicClient]: ...
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def gateway_provider(
|
|
62
|
+
upstream_provider: Literal['openai', 'openai-chat', 'openai-responses', 'groq', 'google-vertex', 'anthropic'] | str,
|
|
63
|
+
*,
|
|
64
|
+
# Every provider
|
|
65
|
+
api_key: str | None = None,
|
|
66
|
+
base_url: str | None = None,
|
|
67
|
+
# OpenAI & Groq
|
|
68
|
+
http_client: httpx.AsyncClient | None = None,
|
|
69
|
+
) -> Provider[Any]:
|
|
70
|
+
"""Create a new Gateway provider.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
upstream_provider: The upstream provider to use.
|
|
74
|
+
api_key: The API key to use for authentication. If not provided, the `PYDANTIC_AI_GATEWAY_API_KEY`
|
|
75
|
+
environment variable will be used if available.
|
|
76
|
+
base_url: The base URL to use for the Gateway. If not provided, the `PYDANTIC_AI_GATEWAY_BASE_URL`
|
|
77
|
+
environment variable will be used if available. Otherwise, defaults to `http://localhost:8787/`.
|
|
78
|
+
http_client: The HTTP client to use for the Gateway.
|
|
79
|
+
"""
|
|
80
|
+
api_key = api_key or os.getenv('PYDANTIC_AI_GATEWAY_API_KEY')
|
|
81
|
+
if not api_key:
|
|
82
|
+
raise UserError(
|
|
83
|
+
'Set the `PYDANTIC_AI_GATEWAY_API_KEY` environment variable or pass it via `gateway_provider(api_key=...)`'
|
|
84
|
+
' to use the Pydantic AI Gateway provider.'
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
base_url = base_url or os.getenv('PYDANTIC_AI_GATEWAY_BASE_URL', 'http://localhost:8787')
|
|
88
|
+
http_client = http_client or cached_async_http_client(provider=f'gateway-{upstream_provider}')
|
|
89
|
+
http_client.event_hooks = {'request': [_request_hook]}
|
|
90
|
+
|
|
91
|
+
if upstream_provider in ('openai', 'openai-chat'):
|
|
92
|
+
from .openai import OpenAIProvider
|
|
93
|
+
|
|
94
|
+
return OpenAIProvider(api_key=api_key, base_url=urljoin(base_url, 'openai'), http_client=http_client)
|
|
95
|
+
elif upstream_provider == 'openai-responses':
|
|
96
|
+
from .openai import OpenAIProvider
|
|
97
|
+
|
|
98
|
+
return OpenAIProvider(api_key=api_key, base_url=urljoin(base_url, 'openai'), http_client=http_client)
|
|
99
|
+
elif upstream_provider == 'groq':
|
|
100
|
+
from .groq import GroqProvider
|
|
101
|
+
|
|
102
|
+
return GroqProvider(api_key=api_key, base_url=urljoin(base_url, 'groq'), http_client=http_client)
|
|
103
|
+
elif upstream_provider == 'anthropic':
|
|
104
|
+
from anthropic import AsyncAnthropic
|
|
105
|
+
|
|
106
|
+
from .anthropic import AnthropicProvider
|
|
107
|
+
|
|
108
|
+
return AnthropicProvider(
|
|
109
|
+
anthropic_client=AsyncAnthropic(
|
|
110
|
+
auth_token=api_key,
|
|
111
|
+
base_url=urljoin(base_url, 'anthropic'),
|
|
112
|
+
http_client=http_client,
|
|
113
|
+
)
|
|
114
|
+
)
|
|
115
|
+
elif upstream_provider == 'google-vertex':
|
|
116
|
+
from google.genai import Client as GoogleClient
|
|
117
|
+
|
|
118
|
+
from .google import GoogleProvider
|
|
119
|
+
|
|
120
|
+
return GoogleProvider(
|
|
121
|
+
client=GoogleClient(
|
|
122
|
+
vertexai=True,
|
|
123
|
+
api_key='unset',
|
|
124
|
+
http_options={
|
|
125
|
+
'base_url': f'{base_url}/google-vertex',
|
|
126
|
+
'headers': {'User-Agent': get_user_agent(), 'Authorization': api_key},
|
|
127
|
+
# TODO(Marcelo): Until https://github.com/googleapis/python-genai/issues/1357 is solved.
|
|
128
|
+
'async_client_args': {
|
|
129
|
+
'transport': httpx.AsyncHTTPTransport(),
|
|
130
|
+
'event_hooks': {'request': [_request_hook]},
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
)
|
|
134
|
+
)
|
|
135
|
+
else: # pragma: no cover
|
|
136
|
+
raise UserError(f'Unknown provider: {upstream_provider}')
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def infer_model(model_name: str) -> Model:
|
|
140
|
+
"""Infer the model class that will be used to make requests to the gateway.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
model_name: The name of the model to infer. Must be in the format "provider/model_name".
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
The model class that will be used to make requests to the gateway.
|
|
147
|
+
"""
|
|
148
|
+
try:
|
|
149
|
+
upstream_provider, model_name = model_name.split('/', 1)
|
|
150
|
+
except ValueError:
|
|
151
|
+
raise UserError(f'The model name "{model_name}" is not in the format "provider/model_name".')
|
|
152
|
+
|
|
153
|
+
if upstream_provider in ('openai', 'openai-chat'):
|
|
154
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
155
|
+
|
|
156
|
+
return OpenAIChatModel(model_name, provider=gateway_provider('openai'))
|
|
157
|
+
elif upstream_provider == 'openai-responses':
|
|
158
|
+
from pydantic_ai.models.openai import OpenAIResponsesModel
|
|
159
|
+
|
|
160
|
+
return OpenAIResponsesModel(model_name, provider=gateway_provider('openai'))
|
|
161
|
+
elif upstream_provider == 'groq':
|
|
162
|
+
from pydantic_ai.models.groq import GroqModel
|
|
163
|
+
|
|
164
|
+
return GroqModel(model_name, provider=gateway_provider('groq'))
|
|
165
|
+
elif upstream_provider == 'anthropic':
|
|
166
|
+
from pydantic_ai.models.anthropic import AnthropicModel
|
|
167
|
+
|
|
168
|
+
return AnthropicModel(model_name, provider=gateway_provider('anthropic'))
|
|
169
|
+
elif upstream_provider == 'google-vertex':
|
|
170
|
+
from pydantic_ai.models.google import GoogleModel
|
|
171
|
+
|
|
172
|
+
return GoogleModel(model_name, provider=gateway_provider('google-vertex'))
|
|
173
|
+
raise UserError(f'Unknown upstream provider: {upstream_provider}')
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
async def _request_hook(request: httpx.Request) -> httpx.Request:
|
|
177
|
+
"""Request hook for the gateway provider.
|
|
178
|
+
|
|
179
|
+
It adds the `"traceparent"` header to the request.
|
|
180
|
+
"""
|
|
181
|
+
from opentelemetry.propagate import inject
|
|
182
|
+
|
|
183
|
+
headers: dict[str, Any] = {}
|
|
184
|
+
inject(headers)
|
|
185
|
+
request.headers.update(headers)
|
|
186
|
+
|
|
187
|
+
return request
|
|
@@ -106,13 +106,13 @@ class GoogleProvider(Provider[Client]):
|
|
|
106
106
|
else:
|
|
107
107
|
self._client = Client(
|
|
108
108
|
vertexai=vertexai,
|
|
109
|
-
project=project or os.
|
|
109
|
+
project=project or os.getenv('GOOGLE_CLOUD_PROJECT'),
|
|
110
110
|
# From https://github.com/pydantic/pydantic-ai/pull/2031/files#r2169682149:
|
|
111
111
|
# Currently `us-central1` supports the most models by far of any region including `global`, but not
|
|
112
112
|
# all of them. `us-central1` has all google models but is missing some Anthropic partner models,
|
|
113
113
|
# which use `us-east5` instead. `global` has fewer models but higher availability.
|
|
114
114
|
# For more details, check: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions
|
|
115
|
-
location=location or os.
|
|
115
|
+
location=location or os.getenv('GOOGLE_CLOUD_LOCATION') or 'us-central1',
|
|
116
116
|
credentials=credentials,
|
|
117
117
|
http_options=http_options,
|
|
118
118
|
)
|
|
@@ -39,7 +39,7 @@ class GoogleGLAProvider(Provider[httpx.AsyncClient]):
|
|
|
39
39
|
will be used if available.
|
|
40
40
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
41
41
|
"""
|
|
42
|
-
api_key = api_key or os.
|
|
42
|
+
api_key = api_key or os.getenv('GEMINI_API_KEY')
|
|
43
43
|
if not api_key:
|
|
44
44
|
raise UserError(
|
|
45
45
|
'Set the `GEMINI_API_KEY` environment variable or pass it via `GoogleGLAProvider(api_key=...)`'
|
|
@@ -53,7 +53,7 @@ class GroqProvider(Provider[AsyncGroq]):
|
|
|
53
53
|
|
|
54
54
|
@property
|
|
55
55
|
def base_url(self) -> str:
|
|
56
|
-
return
|
|
56
|
+
return str(self.client.base_url)
|
|
57
57
|
|
|
58
58
|
@property
|
|
59
59
|
def client(self) -> AsyncGroq:
|
|
@@ -85,12 +85,15 @@ class GroqProvider(Provider[AsyncGroq]):
|
|
|
85
85
|
def __init__(self, *, groq_client: AsyncGroq | None = None) -> None: ...
|
|
86
86
|
|
|
87
87
|
@overload
|
|
88
|
-
def __init__(
|
|
88
|
+
def __init__(
|
|
89
|
+
self, *, api_key: str | None = None, base_url: str | None = None, http_client: httpx.AsyncClient | None = None
|
|
90
|
+
) -> None: ...
|
|
89
91
|
|
|
90
92
|
def __init__(
|
|
91
93
|
self,
|
|
92
94
|
*,
|
|
93
95
|
api_key: str | None = None,
|
|
96
|
+
base_url: str | None = None,
|
|
94
97
|
groq_client: AsyncGroq | None = None,
|
|
95
98
|
http_client: httpx.AsyncClient | None = None,
|
|
96
99
|
) -> None:
|
|
@@ -99,6 +102,8 @@ class GroqProvider(Provider[AsyncGroq]):
|
|
|
99
102
|
Args:
|
|
100
103
|
api_key: The API key to use for authentication, if not provided, the `GROQ_API_KEY` environment variable
|
|
101
104
|
will be used if available.
|
|
105
|
+
base_url: The base url for the Groq requests. If not provided, the `GROQ_BASE_URL` environment variable
|
|
106
|
+
will be used if available. Otherwise, defaults to Groq's base url.
|
|
102
107
|
groq_client: An existing
|
|
103
108
|
[`AsyncGroq`](https://github.com/groq/groq-python?tab=readme-ov-file#async-usage)
|
|
104
109
|
client to use. If provided, `api_key` and `http_client` must be `None`.
|
|
@@ -107,9 +112,11 @@ class GroqProvider(Provider[AsyncGroq]):
|
|
|
107
112
|
if groq_client is not None:
|
|
108
113
|
assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
|
|
109
114
|
assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
|
|
115
|
+
assert base_url is None, 'Cannot provide both `groq_client` and `base_url`'
|
|
110
116
|
self._client = groq_client
|
|
111
117
|
else:
|
|
112
|
-
api_key = api_key or os.
|
|
118
|
+
api_key = api_key or os.getenv('GROQ_API_KEY')
|
|
119
|
+
base_url = base_url or os.getenv('GROQ_BASE_URL', 'https://api.groq.com')
|
|
113
120
|
|
|
114
121
|
if not api_key:
|
|
115
122
|
raise UserError(
|
|
@@ -117,7 +124,7 @@ class GroqProvider(Provider[AsyncGroq]):
|
|
|
117
124
|
'to use the Groq provider.'
|
|
118
125
|
)
|
|
119
126
|
elif http_client is not None:
|
|
120
|
-
self._client = AsyncGroq(base_url=
|
|
127
|
+
self._client = AsyncGroq(base_url=base_url, api_key=api_key, http_client=http_client)
|
|
121
128
|
else:
|
|
122
129
|
http_client = cached_async_http_client(provider='groq')
|
|
123
|
-
self._client = AsyncGroq(base_url=
|
|
130
|
+
self._client = AsyncGroq(base_url=base_url, api_key=api_key, http_client=http_client)
|
|
@@ -65,14 +65,14 @@ class HerokuProvider(Provider[AsyncOpenAI]):
|
|
|
65
65
|
assert api_key is None, 'Cannot provide both `openai_client` and `api_key`'
|
|
66
66
|
self._client = openai_client
|
|
67
67
|
else:
|
|
68
|
-
api_key = api_key or os.
|
|
68
|
+
api_key = api_key or os.getenv('HEROKU_INFERENCE_KEY')
|
|
69
69
|
if not api_key:
|
|
70
70
|
raise UserError(
|
|
71
71
|
'Set the `HEROKU_INFERENCE_KEY` environment variable or pass it via `HerokuProvider(api_key=...)`'
|
|
72
72
|
'to use the Heroku provider.'
|
|
73
73
|
)
|
|
74
74
|
|
|
75
|
-
base_url = base_url or os.
|
|
75
|
+
base_url = base_url or os.getenv('HEROKU_INFERENCE_URL', 'https://us.inference.heroku.com')
|
|
76
76
|
base_url = base_url.rstrip('/') + '/v1'
|
|
77
77
|
|
|
78
78
|
if http_client is not None:
|
|
@@ -95,7 +95,7 @@ class HuggingFaceProvider(Provider[AsyncInferenceClient]):
|
|
|
95
95
|
defaults to "auto", which will select the first available provider for the model, the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
96
96
|
If `base_url` is passed, then `provider_name` is not used.
|
|
97
97
|
"""
|
|
98
|
-
api_key = api_key or os.
|
|
98
|
+
api_key = api_key or os.getenv('HF_TOKEN')
|
|
99
99
|
|
|
100
100
|
if api_key is None:
|
|
101
101
|
raise UserError(
|
|
@@ -67,7 +67,7 @@ class MistralProvider(Provider[Mistral]):
|
|
|
67
67
|
assert base_url is None, 'Cannot provide both `mistral_client` and `base_url`'
|
|
68
68
|
self._client = mistral_client
|
|
69
69
|
else:
|
|
70
|
-
api_key = api_key or os.
|
|
70
|
+
api_key = api_key or os.getenv('MISTRAL_API_KEY')
|
|
71
71
|
|
|
72
72
|
if not api_key:
|
|
73
73
|
raise UserError(
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
+
from typing import overload
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
@@ -36,6 +37,18 @@ class OpenAIProvider(Provider[AsyncOpenAI]):
|
|
|
36
37
|
def model_profile(self, model_name: str) -> ModelProfile | None:
|
|
37
38
|
return openai_model_profile(model_name)
|
|
38
39
|
|
|
40
|
+
@overload
|
|
41
|
+
def __init__(self, *, openai_client: AsyncOpenAI) -> None: ...
|
|
42
|
+
|
|
43
|
+
@overload
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
base_url: str | None = None,
|
|
47
|
+
api_key: str | None = None,
|
|
48
|
+
openai_client: None = None,
|
|
49
|
+
http_client: httpx.AsyncClient | None = None,
|
|
50
|
+
) -> None: ...
|
|
51
|
+
|
|
39
52
|
def __init__(
|
|
40
53
|
self,
|
|
41
54
|
base_url: str | None = None,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/dbos/_mcp_server.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/__init__.py
RENAMED
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_agent.py
RENAMED
|
File without changes
|
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_logfire.py
RENAMED
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_mcp_server.py
RENAMED
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_model.py
RENAMED
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_run_context.py
RENAMED
|
File without changes
|
{pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.4}/pydantic_ai/durable_exec/temporal/_toolset.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|