pydantic-ai-slim 0.1.3__tar.gz → 0.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/PKG-INFO +6 -6
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_utils.py +1 -10
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/agent.py +15 -16
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/mcp.py +28 -1
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/__init__.py +6 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/gemini.py +1 -1
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/openai.py +25 -20
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pyproject.toml +3 -3
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/.gitignore +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/README.md +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.1.3 → pydantic_ai_slim-0.1.5}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.5
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.1.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.1.5
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,22 +45,22 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.1.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.1.5; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
52
52
|
Requires-Dist: logfire>=3.11.0; extra == 'logfire'
|
|
53
53
|
Provides-Extra: mcp
|
|
54
|
-
Requires-Dist: mcp>=1.
|
|
54
|
+
Requires-Dist: mcp>=1.6.0; (python_version >= '3.10') and extra == 'mcp'
|
|
55
55
|
Provides-Extra: mistral
|
|
56
56
|
Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
|
|
57
57
|
Provides-Extra: openai
|
|
58
|
-
Requires-Dist: openai>=1.
|
|
58
|
+
Requires-Dist: openai>=1.75.0; extra == 'openai'
|
|
59
59
|
Provides-Extra: tavily
|
|
60
60
|
Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
|
|
61
61
|
Provides-Extra: vertexai
|
|
62
62
|
Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
|
|
63
|
-
Requires-Dist: requests>=2.32.
|
|
63
|
+
Requires-Dist: requests>=2.32.2; extra == 'vertexai'
|
|
64
64
|
Description-Content-Type: text/markdown
|
|
65
65
|
|
|
66
66
|
# PydanticAI Slim
|
|
@@ -291,13 +291,4 @@ class PeekableAsyncStream(Generic[T]):
|
|
|
291
291
|
|
|
292
292
|
|
|
293
293
|
def get_traceparent(x: AgentRun | AgentRunResult | GraphRun | GraphRunResult) -> str:
|
|
294
|
-
|
|
295
|
-
import logfire_api
|
|
296
|
-
from logfire.experimental.annotations import get_traceparent
|
|
297
|
-
|
|
298
|
-
span: AbstractSpan | None = x._span(required=False) # type: ignore[reportPrivateUsage]
|
|
299
|
-
if not span: # pragma: no cover
|
|
300
|
-
return ''
|
|
301
|
-
if isinstance(span, logfire_api.LogfireSpan): # pragma: no cover
|
|
302
|
-
assert isinstance(span, logfire.LogfireSpan)
|
|
303
|
-
return get_traceparent(span)
|
|
294
|
+
return x._traceparent(required=False) or '' # type: ignore[reportPrivateUsage]
|
|
@@ -27,7 +27,6 @@ from . import (
|
|
|
27
27
|
result,
|
|
28
28
|
usage as _usage,
|
|
29
29
|
)
|
|
30
|
-
from ._utils import AbstractSpan
|
|
31
30
|
from .models.instrumented import InstrumentationSettings, InstrumentedModel
|
|
32
31
|
from .result import FinalResult, OutputDataT, StreamedRunResult, ToolOutput
|
|
33
32
|
from .settings import ModelSettings, merge_model_settings
|
|
@@ -1683,14 +1682,14 @@ class AgentRun(Generic[AgentDepsT, OutputDataT]):
|
|
|
1683
1682
|
]
|
|
1684
1683
|
|
|
1685
1684
|
@overload
|
|
1686
|
-
def
|
|
1685
|
+
def _traceparent(self, *, required: Literal[False]) -> str | None: ...
|
|
1687
1686
|
@overload
|
|
1688
|
-
def
|
|
1689
|
-
def
|
|
1690
|
-
|
|
1691
|
-
if
|
|
1692
|
-
raise AttributeError('
|
|
1693
|
-
return
|
|
1687
|
+
def _traceparent(self) -> str: ...
|
|
1688
|
+
def _traceparent(self, *, required: bool = True) -> str | None:
|
|
1689
|
+
traceparent = self._graph_run._traceparent(required=False) # type: ignore[reportPrivateUsage]
|
|
1690
|
+
if traceparent is None and required: # pragma: no cover
|
|
1691
|
+
raise AttributeError('No span was created for this agent run')
|
|
1692
|
+
return traceparent
|
|
1694
1693
|
|
|
1695
1694
|
@property
|
|
1696
1695
|
def ctx(self) -> GraphRunContext[_agent_graph.GraphAgentState, _agent_graph.GraphAgentDeps[AgentDepsT, Any]]:
|
|
@@ -1729,7 +1728,7 @@ class AgentRun(Generic[AgentDepsT, OutputDataT]):
|
|
|
1729
1728
|
graph_run_result.output.tool_name,
|
|
1730
1729
|
graph_run_result.state,
|
|
1731
1730
|
self._graph_run.deps.new_message_index,
|
|
1732
|
-
self.
|
|
1731
|
+
self._traceparent(required=False),
|
|
1733
1732
|
)
|
|
1734
1733
|
|
|
1735
1734
|
def __aiter__(
|
|
@@ -1847,16 +1846,16 @@ class AgentRunResult(Generic[OutputDataT]):
|
|
|
1847
1846
|
_output_tool_name: str | None = dataclasses.field(repr=False)
|
|
1848
1847
|
_state: _agent_graph.GraphAgentState = dataclasses.field(repr=False)
|
|
1849
1848
|
_new_message_index: int = dataclasses.field(repr=False)
|
|
1850
|
-
|
|
1849
|
+
_traceparent_value: str | None = dataclasses.field(repr=False)
|
|
1851
1850
|
|
|
1852
1851
|
@overload
|
|
1853
|
-
def
|
|
1852
|
+
def _traceparent(self, *, required: Literal[False]) -> str | None: ...
|
|
1854
1853
|
@overload
|
|
1855
|
-
def
|
|
1856
|
-
def
|
|
1857
|
-
if self.
|
|
1858
|
-
raise AttributeError('
|
|
1859
|
-
return self.
|
|
1854
|
+
def _traceparent(self) -> str: ...
|
|
1855
|
+
def _traceparent(self, *, required: bool = True) -> str | None:
|
|
1856
|
+
if self._traceparent_value is None and required: # pragma: no cover
|
|
1857
|
+
raise AttributeError('No span was created for this agent run')
|
|
1858
|
+
return self._traceparent_value
|
|
1860
1859
|
|
|
1861
1860
|
@property
|
|
1862
1861
|
@deprecated('`result.data` is deprecated, use `result.output` instead.')
|
|
@@ -9,7 +9,7 @@ from types import TracebackType
|
|
|
9
9
|
from typing import Any
|
|
10
10
|
|
|
11
11
|
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
|
12
|
-
from mcp.types import JSONRPCMessage
|
|
12
|
+
from mcp.types import JSONRPCMessage, LoggingLevel
|
|
13
13
|
from typing_extensions import Self
|
|
14
14
|
|
|
15
15
|
from pydantic_ai.tools import ToolDefinition
|
|
@@ -52,6 +52,11 @@ class MCPServer(ABC):
|
|
|
52
52
|
raise NotImplementedError('MCP Server subclasses must implement this method.')
|
|
53
53
|
yield
|
|
54
54
|
|
|
55
|
+
@abstractmethod
|
|
56
|
+
def _get_log_level(self) -> LoggingLevel | None:
|
|
57
|
+
"""Get the log level for the MCP server."""
|
|
58
|
+
raise NotImplementedError('MCP Server subclasses must implement this method.')
|
|
59
|
+
|
|
55
60
|
async def list_tools(self) -> list[ToolDefinition]:
|
|
56
61
|
"""Retrieve tools that are currently active on the server.
|
|
57
62
|
|
|
@@ -89,6 +94,8 @@ class MCPServer(ABC):
|
|
|
89
94
|
self._client = await self._exit_stack.enter_async_context(client)
|
|
90
95
|
|
|
91
96
|
await self._client.initialize()
|
|
97
|
+
if log_level := self._get_log_level():
|
|
98
|
+
await self._client.set_logging_level(log_level)
|
|
92
99
|
self.is_running = True
|
|
93
100
|
return self
|
|
94
101
|
|
|
@@ -150,6 +157,13 @@ class MCPServerStdio(MCPServer):
|
|
|
150
157
|
By default the subprocess will not inherit any environment variables from the parent process.
|
|
151
158
|
If you want to inherit the environment variables from the parent process, use `env=os.environ`.
|
|
152
159
|
"""
|
|
160
|
+
log_level: LoggingLevel | None = None
|
|
161
|
+
"""The log level to set when connecting to the server, if any.
|
|
162
|
+
|
|
163
|
+
See <https://modelcontextprotocol.io/specification/2025-03-26/server/utilities/logging#logging> for more details.
|
|
164
|
+
|
|
165
|
+
If `None`, no log level will be set.
|
|
166
|
+
"""
|
|
153
167
|
|
|
154
168
|
cwd: str | Path | None = None
|
|
155
169
|
"""The working directory to use when spawning the process."""
|
|
@@ -164,6 +178,9 @@ class MCPServerStdio(MCPServer):
|
|
|
164
178
|
async with stdio_client(server=server) as (read_stream, write_stream):
|
|
165
179
|
yield read_stream, write_stream
|
|
166
180
|
|
|
181
|
+
def _get_log_level(self) -> LoggingLevel | None:
|
|
182
|
+
return self.log_level
|
|
183
|
+
|
|
167
184
|
|
|
168
185
|
@dataclass
|
|
169
186
|
class MCPServerHTTP(MCPServer):
|
|
@@ -223,6 +240,13 @@ class MCPServerHTTP(MCPServer):
|
|
|
223
240
|
If no new messages are received within this time, the connection will be considered stale
|
|
224
241
|
and may be closed. Defaults to 5 minutes (300 seconds).
|
|
225
242
|
"""
|
|
243
|
+
log_level: LoggingLevel | None = None
|
|
244
|
+
"""The log level to set when connecting to the server, if any.
|
|
245
|
+
|
|
246
|
+
See <https://modelcontextprotocol.io/specification/2025-03-26/server/utilities/logging#logging> for more details.
|
|
247
|
+
|
|
248
|
+
If `None`, no log level will be set.
|
|
249
|
+
"""
|
|
226
250
|
|
|
227
251
|
@asynccontextmanager
|
|
228
252
|
async def client_streams(
|
|
@@ -234,3 +258,6 @@ class MCPServerHTTP(MCPServer):
|
|
|
234
258
|
url=self.url, headers=self.headers, timeout=self.timeout, sse_read_timeout=self.sse_read_timeout
|
|
235
259
|
) as (read_stream, write_stream):
|
|
236
260
|
yield read_stream, write_stream
|
|
261
|
+
|
|
262
|
+
def _get_log_level(self) -> LoggingLevel | None:
|
|
263
|
+
return self.log_level
|
|
@@ -194,6 +194,8 @@ KnownModelName = TypeAliasType(
|
|
|
194
194
|
'o1-mini-2024-09-12',
|
|
195
195
|
'o1-preview',
|
|
196
196
|
'o1-preview-2024-09-12',
|
|
197
|
+
'o3',
|
|
198
|
+
'o3-2025-04-16',
|
|
197
199
|
'o3-mini',
|
|
198
200
|
'o3-mini-2025-01-31',
|
|
199
201
|
'openai:chatgpt-4o-latest',
|
|
@@ -243,8 +245,12 @@ KnownModelName = TypeAliasType(
|
|
|
243
245
|
'openai:o1-mini-2024-09-12',
|
|
244
246
|
'openai:o1-preview',
|
|
245
247
|
'openai:o1-preview-2024-09-12',
|
|
248
|
+
'openai:o3',
|
|
249
|
+
'openai:o3-2025-04-16',
|
|
246
250
|
'openai:o3-mini',
|
|
247
251
|
'openai:o3-mini-2025-01-31',
|
|
252
|
+
'openai:o4-mini',
|
|
253
|
+
'openai:o4-mini-2025-04-16',
|
|
248
254
|
'test',
|
|
249
255
|
],
|
|
250
256
|
)
|
|
@@ -641,7 +641,7 @@ class _GeminiTextContent(TypedDict):
|
|
|
641
641
|
|
|
642
642
|
|
|
643
643
|
class _GeminiTools(TypedDict):
|
|
644
|
-
function_declarations: list[
|
|
644
|
+
function_declarations: Annotated[list[_GeminiFunction], pydantic.Field(alias='functionDeclarations')]
|
|
645
645
|
|
|
646
646
|
|
|
647
647
|
class _GeminiFunction(TypedDict):
|
|
@@ -57,6 +57,7 @@ try:
|
|
|
57
57
|
)
|
|
58
58
|
from openai.types.chat.chat_completion_content_part_image_param import ImageURL
|
|
59
59
|
from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio
|
|
60
|
+
from openai.types.chat.chat_completion_content_part_param import File, FileFile
|
|
60
61
|
from openai.types.responses import ComputerToolParam, FileSearchToolParam, WebSearchToolParam
|
|
61
62
|
from openai.types.responses.response_input_param import FunctionCallOutput, Message
|
|
62
63
|
from openai.types.shared import ReasoningEffort
|
|
@@ -426,6 +427,16 @@ class OpenAIModel(Model):
|
|
|
426
427
|
assert item.format in ('wav', 'mp3')
|
|
427
428
|
audio = InputAudio(data=base64_encoded, format=item.format)
|
|
428
429
|
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
430
|
+
elif item.is_document:
|
|
431
|
+
content.append(
|
|
432
|
+
File(
|
|
433
|
+
file=FileFile(
|
|
434
|
+
file_data=f'data:{item.media_type};base64,{base64_encoded}',
|
|
435
|
+
filename=f'filename.{item.format}',
|
|
436
|
+
),
|
|
437
|
+
type='file',
|
|
438
|
+
)
|
|
439
|
+
)
|
|
429
440
|
else: # pragma: no cover
|
|
430
441
|
raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
|
|
431
442
|
elif isinstance(item, AudioUrl): # pragma: no cover
|
|
@@ -435,25 +446,18 @@ class OpenAIModel(Model):
|
|
|
435
446
|
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
436
447
|
audio = InputAudio(data=base64_encoded, format=response.headers.get('content-type'))
|
|
437
448
|
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
438
|
-
elif isinstance(item, DocumentUrl):
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
# response = await client.get(item.url)
|
|
451
|
-
# response.raise_for_status()
|
|
452
|
-
# base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
453
|
-
# media_type = response.headers.get('content-type').split(';')[0]
|
|
454
|
-
# file_data = f'data:{media_type};base64,{base64_encoded}'
|
|
455
|
-
# file = File(file={'file_data': file_data, 'file_name': item.url, 'file_id': item.url}, type='file')
|
|
456
|
-
# content.append(file)
|
|
449
|
+
elif isinstance(item, DocumentUrl):
|
|
450
|
+
client = cached_async_http_client()
|
|
451
|
+
response = await client.get(item.url)
|
|
452
|
+
response.raise_for_status()
|
|
453
|
+
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
454
|
+
media_type = response.headers.get('content-type').split(';')[0]
|
|
455
|
+
file_data = f'data:{media_type};base64,{base64_encoded}'
|
|
456
|
+
file = File(
|
|
457
|
+
file=FileFile(file_data=file_data, filename=f'filename.{item.format}'),
|
|
458
|
+
type='file',
|
|
459
|
+
)
|
|
460
|
+
content.append(file)
|
|
457
461
|
elif isinstance(item, VideoUrl): # pragma: no cover
|
|
458
462
|
raise NotImplementedError('VideoUrl is not supported for OpenAI')
|
|
459
463
|
else:
|
|
@@ -769,10 +773,11 @@ class OpenAIResponsesModel(Model):
|
|
|
769
773
|
response = await client.get(item.url)
|
|
770
774
|
response.raise_for_status()
|
|
771
775
|
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
776
|
+
media_type = response.headers.get('content-type').split(';')[0]
|
|
772
777
|
content.append(
|
|
773
778
|
responses.ResponseInputFileParam(
|
|
774
779
|
type='input_file',
|
|
775
|
-
file_data=f'data:{
|
|
780
|
+
file_data=f'data:{media_type};base64,{base64_encoded}',
|
|
776
781
|
filename=f'filename.{item.format}',
|
|
777
782
|
)
|
|
778
783
|
)
|
|
@@ -56,9 +56,9 @@ dependencies = [
|
|
|
56
56
|
# WARNING if you add optional groups, please update docs/install.md
|
|
57
57
|
logfire = ["logfire>=3.11.0"]
|
|
58
58
|
# Models
|
|
59
|
-
openai = ["openai>=1.
|
|
59
|
+
openai = ["openai>=1.75.0"]
|
|
60
60
|
cohere = ["cohere>=5.13.11; platform_system != 'Emscripten'"]
|
|
61
|
-
vertexai = ["google-auth>=2.36.0", "requests>=2.32.
|
|
61
|
+
vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
|
|
62
62
|
anthropic = ["anthropic>=0.49.0"]
|
|
63
63
|
groq = ["groq>=0.15.0"]
|
|
64
64
|
mistral = ["mistralai>=1.2.5"]
|
|
@@ -69,7 +69,7 @@ tavily = ["tavily-python>=0.5.0"]
|
|
|
69
69
|
# CLI
|
|
70
70
|
cli = ["rich>=13", "prompt-toolkit>=3", "argcomplete>=3.5.0"]
|
|
71
71
|
# MCP
|
|
72
|
-
mcp = ["mcp>=1.
|
|
72
|
+
mcp = ["mcp>=1.6.0; python_version >= '3.10'"]
|
|
73
73
|
# Evals
|
|
74
74
|
evals = ["pydantic-evals=={{ version }}"]
|
|
75
75
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|