pydantic-ai-slim 0.1.9__tar.gz → 0.1.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/anthropic.py +4 -2
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/bedrock.py +1 -1
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/cohere.py +1 -1
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/gemini.py +27 -2
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/groq.py +4 -2
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/mistral.py +1 -1
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/openai.py +6 -2
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/settings.py +10 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/.gitignore +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/README.md +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/agent.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.1.9 → pydantic_ai_slim-0.1.10}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.10
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.1.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.1.10
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.1.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.1.10; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
@@ -90,7 +90,7 @@ See [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models)
|
|
|
90
90
|
"""
|
|
91
91
|
|
|
92
92
|
|
|
93
|
-
class AnthropicModelSettings(ModelSettings):
|
|
93
|
+
class AnthropicModelSettings(ModelSettings, total=False):
|
|
94
94
|
"""Settings used for an Anthropic model request.
|
|
95
95
|
|
|
96
96
|
ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
@@ -221,6 +221,8 @@ class AnthropicModel(Model):
|
|
|
221
221
|
system_prompt, anthropic_messages = await self._map_message(messages)
|
|
222
222
|
|
|
223
223
|
try:
|
|
224
|
+
extra_headers = model_settings.get('extra_headers', {})
|
|
225
|
+
extra_headers.setdefault('User-Agent', get_user_agent())
|
|
224
226
|
return await self.client.messages.create(
|
|
225
227
|
max_tokens=model_settings.get('max_tokens', 1024),
|
|
226
228
|
system=system_prompt or NOT_GIVEN,
|
|
@@ -234,7 +236,7 @@ class AnthropicModel(Model):
|
|
|
234
236
|
top_p=model_settings.get('top_p', NOT_GIVEN),
|
|
235
237
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
236
238
|
metadata=model_settings.get('anthropic_metadata', NOT_GIVEN),
|
|
237
|
-
extra_headers=
|
|
239
|
+
extra_headers=extra_headers,
|
|
238
240
|
extra_body=model_settings.get('extra_body'),
|
|
239
241
|
)
|
|
240
242
|
except APIStatusError as e:
|
|
@@ -355,7 +355,7 @@ class BedrockConverseModel(Model):
|
|
|
355
355
|
|
|
356
356
|
if max_tokens := model_settings.get('max_tokens'):
|
|
357
357
|
inference_config['maxTokens'] = max_tokens
|
|
358
|
-
if temperature := model_settings.get('temperature'):
|
|
358
|
+
if (temperature := model_settings.get('temperature')) is not None:
|
|
359
359
|
inference_config['temperature'] = temperature
|
|
360
360
|
if top_p := model_settings.get('top_p'):
|
|
361
361
|
inference_config['topP'] = top_p
|
|
@@ -78,7 +78,7 @@ See [Cohere's docs](https://docs.cohere.com/v2/docs/models) for a list of all av
|
|
|
78
78
|
"""
|
|
79
79
|
|
|
80
80
|
|
|
81
|
-
class CohereModelSettings(ModelSettings):
|
|
81
|
+
class CohereModelSettings(ModelSettings, total=False):
|
|
82
82
|
"""Settings used for a Cohere model request.
|
|
83
83
|
|
|
84
84
|
ALL FIELDS MUST BE `cohere_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
@@ -73,7 +73,7 @@ See [the Gemini API docs](https://ai.google.dev/gemini-api/docs/models/gemini#mo
|
|
|
73
73
|
"""
|
|
74
74
|
|
|
75
75
|
|
|
76
|
-
class GeminiModelSettings(ModelSettings):
|
|
76
|
+
class GeminiModelSettings(ModelSettings, total=False):
|
|
77
77
|
"""Settings used for a Gemini model request.
|
|
78
78
|
|
|
79
79
|
ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
@@ -81,6 +81,18 @@ class GeminiModelSettings(ModelSettings):
|
|
|
81
81
|
|
|
82
82
|
gemini_safety_settings: list[GeminiSafetySettings]
|
|
83
83
|
|
|
84
|
+
gemini_thinking_config: ThinkingConfig
|
|
85
|
+
"""Thinking is "on" by default in both the API and AI Studio.
|
|
86
|
+
|
|
87
|
+
Being on by default doesn't mean the model will send back thoughts. For that, you would need to set `include_thoughts`
|
|
88
|
+
to `True`, but since end of January 2025, `thoughts` are not returned anymore, and are only displayed in the Google
|
|
89
|
+
AI Studio. See https://discuss.ai.google.dev/t/thoughts-are-missing-cot-not-included-anymore/63653 for more details.
|
|
90
|
+
|
|
91
|
+
If you want to avoid the model spending any tokens on thinking, you can set `thinking_budget` to `0`.
|
|
92
|
+
|
|
93
|
+
See more about it on <https://ai.google.dev/gemini-api/docs/thinking>.
|
|
94
|
+
"""
|
|
95
|
+
|
|
84
96
|
|
|
85
97
|
@dataclass(init=False)
|
|
86
98
|
class GeminiModel(Model):
|
|
@@ -223,7 +235,9 @@ class GeminiModel(Model):
|
|
|
223
235
|
generation_config['presence_penalty'] = presence_penalty
|
|
224
236
|
if (frequency_penalty := model_settings.get('frequency_penalty')) is not None:
|
|
225
237
|
generation_config['frequency_penalty'] = frequency_penalty
|
|
226
|
-
if (
|
|
238
|
+
if (thinkingConfig := model_settings.get('gemini_thinking_config')) is not None:
|
|
239
|
+
generation_config['thinking_config'] = thinkingConfig # pragma: no cover
|
|
240
|
+
if (gemini_safety_settings := model_settings.get('gemini_safety_settings')) is not None:
|
|
227
241
|
request_data['safetySettings'] = gemini_safety_settings
|
|
228
242
|
if generation_config:
|
|
229
243
|
request_data['generationConfig'] = generation_config
|
|
@@ -497,6 +511,16 @@ class GeminiSafetySettings(TypedDict):
|
|
|
497
511
|
"""
|
|
498
512
|
|
|
499
513
|
|
|
514
|
+
class ThinkingConfig(TypedDict, total=False):
|
|
515
|
+
"""The thinking features configuration."""
|
|
516
|
+
|
|
517
|
+
include_thoughts: Annotated[bool, pydantic.Field(alias='includeThoughts')]
|
|
518
|
+
"""Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available."""
|
|
519
|
+
|
|
520
|
+
thinking_budget: Annotated[int, pydantic.Field(alias='thinkingBudget')]
|
|
521
|
+
"""Indicates the thinking budget in tokens."""
|
|
522
|
+
|
|
523
|
+
|
|
500
524
|
class _GeminiGenerationConfig(TypedDict, total=False):
|
|
501
525
|
"""Schema for an API request to the Gemini API.
|
|
502
526
|
|
|
@@ -511,6 +535,7 @@ class _GeminiGenerationConfig(TypedDict, total=False):
|
|
|
511
535
|
presence_penalty: float
|
|
512
536
|
frequency_penalty: float
|
|
513
537
|
stop_sequences: list[str]
|
|
538
|
+
thinking_config: ThinkingConfig
|
|
514
539
|
|
|
515
540
|
|
|
516
541
|
class _GeminiContent(TypedDict):
|
|
@@ -82,7 +82,7 @@ See <https://console.groq.com/docs/models> for an up to date date list of models
|
|
|
82
82
|
"""
|
|
83
83
|
|
|
84
84
|
|
|
85
|
-
class GroqModelSettings(ModelSettings):
|
|
85
|
+
class GroqModelSettings(ModelSettings, total=False):
|
|
86
86
|
"""Settings used for a Groq model request.
|
|
87
87
|
|
|
88
88
|
ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
@@ -200,6 +200,8 @@ class GroqModel(Model):
|
|
|
200
200
|
groq_messages = self._map_messages(messages)
|
|
201
201
|
|
|
202
202
|
try:
|
|
203
|
+
extra_headers = model_settings.get('extra_headers', {})
|
|
204
|
+
extra_headers.setdefault('User-Agent', get_user_agent())
|
|
203
205
|
return await self.client.chat.completions.create(
|
|
204
206
|
model=str(self._model_name),
|
|
205
207
|
messages=groq_messages,
|
|
@@ -217,7 +219,7 @@ class GroqModel(Model):
|
|
|
217
219
|
presence_penalty=model_settings.get('presence_penalty', NOT_GIVEN),
|
|
218
220
|
frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
|
|
219
221
|
logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
|
|
220
|
-
extra_headers=
|
|
222
|
+
extra_headers=extra_headers,
|
|
221
223
|
extra_body=model_settings.get('extra_body'),
|
|
222
224
|
)
|
|
223
225
|
except APIStatusError as e:
|
|
@@ -91,7 +91,7 @@ Since [the Mistral docs](https://docs.mistral.ai/getting-started/models/models_o
|
|
|
91
91
|
"""
|
|
92
92
|
|
|
93
93
|
|
|
94
|
-
class MistralModelSettings(ModelSettings):
|
|
94
|
+
class MistralModelSettings(ModelSettings, total=False):
|
|
95
95
|
"""Settings used for a Mistral model request.
|
|
96
96
|
|
|
97
97
|
ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
@@ -264,6 +264,8 @@ class OpenAIModel(Model):
|
|
|
264
264
|
openai_messages = await self._map_messages(messages)
|
|
265
265
|
|
|
266
266
|
try:
|
|
267
|
+
extra_headers = model_settings.get('extra_headers', {})
|
|
268
|
+
extra_headers.setdefault('User-Agent', get_user_agent())
|
|
267
269
|
return await self.client.chat.completions.create(
|
|
268
270
|
model=self._model_name,
|
|
269
271
|
messages=openai_messages,
|
|
@@ -284,7 +286,7 @@ class OpenAIModel(Model):
|
|
|
284
286
|
logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
|
|
285
287
|
reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
|
|
286
288
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
287
|
-
extra_headers=
|
|
289
|
+
extra_headers=extra_headers,
|
|
288
290
|
extra_body=model_settings.get('extra_body'),
|
|
289
291
|
)
|
|
290
292
|
except APIStatusError as e:
|
|
@@ -610,6 +612,8 @@ class OpenAIResponsesModel(Model):
|
|
|
610
612
|
reasoning = self._get_reasoning(model_settings)
|
|
611
613
|
|
|
612
614
|
try:
|
|
615
|
+
extra_headers = model_settings.get('extra_headers', {})
|
|
616
|
+
extra_headers.setdefault('User-Agent', get_user_agent())
|
|
613
617
|
return await self.client.responses.create(
|
|
614
618
|
input=openai_messages,
|
|
615
619
|
model=self._model_name,
|
|
@@ -625,7 +629,7 @@ class OpenAIResponsesModel(Model):
|
|
|
625
629
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
626
630
|
reasoning=reasoning,
|
|
627
631
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
628
|
-
extra_headers=
|
|
632
|
+
extra_headers=extra_headers,
|
|
629
633
|
extra_body=model_settings.get('extra_body'),
|
|
630
634
|
)
|
|
631
635
|
except APIStatusError as e:
|
|
@@ -141,6 +141,16 @@ class ModelSettings(TypedDict, total=False):
|
|
|
141
141
|
* Cohere
|
|
142
142
|
"""
|
|
143
143
|
|
|
144
|
+
extra_headers: dict[str, str]
|
|
145
|
+
"""Extra headers to send to the model.
|
|
146
|
+
|
|
147
|
+
Supported by:
|
|
148
|
+
|
|
149
|
+
* OpenAI
|
|
150
|
+
* Anthropic
|
|
151
|
+
* Groq
|
|
152
|
+
"""
|
|
153
|
+
|
|
144
154
|
extra_body: object
|
|
145
155
|
"""Extra body to send to the model.
|
|
146
156
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|