pydantic-ai-slim 0.1.7__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_agent_graph.py +24 -2
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/messages.py +3 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/mistral.py +14 -1
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/.gitignore +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/README.md +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/agent.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/openai.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.1.7 → pydantic_ai_slim-0.1.8}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.1.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.1.8
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.1.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.1.8; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
@@ -546,7 +546,7 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT
|
|
|
546
546
|
)
|
|
547
547
|
|
|
548
548
|
|
|
549
|
-
async def process_function_tools(
|
|
549
|
+
async def process_function_tools( # noqa C901
|
|
550
550
|
tool_calls: list[_messages.ToolCallPart],
|
|
551
551
|
output_tool_name: str | None,
|
|
552
552
|
output_tool_call_id: str | None,
|
|
@@ -632,6 +632,8 @@ async def process_function_tools(
|
|
|
632
632
|
if not calls_to_run:
|
|
633
633
|
return
|
|
634
634
|
|
|
635
|
+
user_parts: list[_messages.UserPromptPart] = []
|
|
636
|
+
|
|
635
637
|
# Run all tool tasks in parallel
|
|
636
638
|
results_by_index: dict[int, _messages.ModelRequestPart] = {}
|
|
637
639
|
with ctx.deps.tracer.start_as_current_span(
|
|
@@ -645,6 +647,9 @@ async def process_function_tools(
|
|
|
645
647
|
asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer), name=call.tool_name)
|
|
646
648
|
for tool, call in calls_to_run
|
|
647
649
|
]
|
|
650
|
+
|
|
651
|
+
file_index = 1
|
|
652
|
+
|
|
648
653
|
pending = tasks
|
|
649
654
|
while pending:
|
|
650
655
|
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
|
@@ -652,7 +657,22 @@ async def process_function_tools(
|
|
|
652
657
|
index = tasks.index(task)
|
|
653
658
|
result = task.result()
|
|
654
659
|
yield _messages.FunctionToolResultEvent(result, tool_call_id=call_index_to_event_id[index])
|
|
655
|
-
|
|
660
|
+
|
|
661
|
+
if isinstance(result, _messages.RetryPromptPart):
|
|
662
|
+
results_by_index[index] = result
|
|
663
|
+
elif isinstance(result, _messages.ToolReturnPart):
|
|
664
|
+
if isinstance(result.content, _messages.MultiModalContentTypes):
|
|
665
|
+
user_parts.append(
|
|
666
|
+
_messages.UserPromptPart(
|
|
667
|
+
content=[f'This is file {file_index}:', result.content],
|
|
668
|
+
timestamp=result.timestamp,
|
|
669
|
+
part_kind='user-prompt',
|
|
670
|
+
)
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
result.content = f'See file {file_index}.'
|
|
674
|
+
file_index += 1
|
|
675
|
+
|
|
656
676
|
results_by_index[index] = result
|
|
657
677
|
else:
|
|
658
678
|
assert_never(result)
|
|
@@ -662,6 +682,8 @@ async def process_function_tools(
|
|
|
662
682
|
for k in sorted(results_by_index):
|
|
663
683
|
output_parts.append(results_by_index[k])
|
|
664
684
|
|
|
685
|
+
output_parts.extend(user_parts)
|
|
686
|
+
|
|
665
687
|
|
|
666
688
|
async def _tool_from_mcp_server(
|
|
667
689
|
tool_name: str,
|
|
@@ -253,6 +253,9 @@ class BinaryContent:
|
|
|
253
253
|
|
|
254
254
|
UserContent: TypeAlias = 'str | ImageUrl | AudioUrl | DocumentUrl | VideoUrl | BinaryContent'
|
|
255
255
|
|
|
256
|
+
# Ideally this would be a Union of types, but Python 3.9 requires it to be a string, and strings don't work with `isinstance``.
|
|
257
|
+
MultiModalContentTypes = (ImageUrl, AudioUrl, DocumentUrl, VideoUrl, BinaryContent)
|
|
258
|
+
|
|
256
259
|
|
|
257
260
|
def _document_format(media_type: str) -> DocumentFormat:
|
|
258
261
|
if media_type == 'application/pdf':
|
|
@@ -483,7 +483,20 @@ class MistralModel(Model):
|
|
|
483
483
|
assert_never(message)
|
|
484
484
|
if instructions := self._get_instructions(messages):
|
|
485
485
|
mistral_messages.insert(0, MistralSystemMessage(content=instructions))
|
|
486
|
-
|
|
486
|
+
|
|
487
|
+
# Post-process messages to insert fake assistant message after tool message if followed by user message
|
|
488
|
+
# to work around `Unexpected role 'user' after role 'tool'` error.
|
|
489
|
+
processed_messages: list[MistralMessages] = []
|
|
490
|
+
for i, current_message in enumerate(mistral_messages):
|
|
491
|
+
processed_messages.append(current_message)
|
|
492
|
+
|
|
493
|
+
if isinstance(current_message, MistralToolMessage) and i + 1 < len(mistral_messages):
|
|
494
|
+
next_message = mistral_messages[i + 1]
|
|
495
|
+
if isinstance(next_message, MistralUserMessage):
|
|
496
|
+
# Insert a dummy assistant message
|
|
497
|
+
processed_messages.append(MistralAssistantMessage(content=[MistralTextChunk(text='OK')]))
|
|
498
|
+
|
|
499
|
+
return processed_messages
|
|
487
500
|
|
|
488
501
|
def _map_user_prompt(self, part: UserPromptPart) -> MistralUserMessage:
|
|
489
502
|
content: str | list[MistralContentChunk]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|