pydantic-ai-slim 0.1.6__tar.gz → 0.1.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_agent_graph.py +1 -31
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_pydantic.py +1 -1
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_utils.py +3 -5
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/agent.py +72 -19
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/gemini.py +1 -3
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/openai.py +4 -6
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/.gitignore +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/README.md +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.1.6 → pydantic_ai_slim-0.1.7}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.7
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.1.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.1.7
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.1.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.1.7; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
@@ -2,14 +2,13 @@ from __future__ import annotations as _annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import dataclasses
|
|
5
|
-
import json
|
|
6
5
|
from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
|
|
7
6
|
from contextlib import asynccontextmanager, contextmanager
|
|
8
7
|
from contextvars import ContextVar
|
|
9
8
|
from dataclasses import field
|
|
10
9
|
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union, cast
|
|
11
10
|
|
|
12
|
-
from opentelemetry.trace import
|
|
11
|
+
from opentelemetry.trace import Tracer
|
|
13
12
|
from typing_extensions import TypeGuard, TypeVar, assert_never
|
|
14
13
|
|
|
15
14
|
from pydantic_graph import BaseNode, Graph, GraphRunContext
|
|
@@ -24,7 +23,6 @@ from . import (
|
|
|
24
23
|
result,
|
|
25
24
|
usage as _usage,
|
|
26
25
|
)
|
|
27
|
-
from .models.instrumented import InstrumentedModel
|
|
28
26
|
from .result import OutputDataT, ToolOutput
|
|
29
27
|
from .settings import ModelSettings, merge_model_settings
|
|
30
28
|
from .tools import RunContext, Tool, ToolDefinition
|
|
@@ -95,7 +93,6 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
95
93
|
function_tools: dict[str, Tool[DepsT]] = dataclasses.field(repr=False)
|
|
96
94
|
mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
|
|
97
95
|
|
|
98
|
-
run_span: Span
|
|
99
96
|
tracer: Tracer
|
|
100
97
|
|
|
101
98
|
|
|
@@ -498,39 +495,12 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
498
495
|
final_result: result.FinalResult[NodeRunEndT],
|
|
499
496
|
tool_responses: list[_messages.ModelRequestPart],
|
|
500
497
|
) -> End[result.FinalResult[NodeRunEndT]]:
|
|
501
|
-
run_span = ctx.deps.run_span
|
|
502
|
-
usage = ctx.state.usage
|
|
503
498
|
messages = ctx.state.message_history
|
|
504
499
|
|
|
505
500
|
# For backwards compatibility, append a new ModelRequest using the tool returns and retries
|
|
506
501
|
if tool_responses:
|
|
507
502
|
messages.append(_messages.ModelRequest(parts=tool_responses))
|
|
508
503
|
|
|
509
|
-
run_span.set_attributes(
|
|
510
|
-
{
|
|
511
|
-
**usage.opentelemetry_attributes(),
|
|
512
|
-
'all_messages_events': json.dumps(
|
|
513
|
-
[InstrumentedModel.event_to_dict(e) for e in InstrumentedModel.messages_to_otel_events(messages)]
|
|
514
|
-
),
|
|
515
|
-
'final_result': final_result.output
|
|
516
|
-
if isinstance(final_result.output, str)
|
|
517
|
-
else json.dumps(InstrumentedModel.serialize_any(final_result.output)),
|
|
518
|
-
}
|
|
519
|
-
)
|
|
520
|
-
run_span.set_attributes(
|
|
521
|
-
{
|
|
522
|
-
'logfire.json_schema': json.dumps(
|
|
523
|
-
{
|
|
524
|
-
'type': 'object',
|
|
525
|
-
'properties': {
|
|
526
|
-
'all_messages_events': {'type': 'array'},
|
|
527
|
-
'final_result': {'type': 'object'},
|
|
528
|
-
},
|
|
529
|
-
}
|
|
530
|
-
),
|
|
531
|
-
}
|
|
532
|
-
)
|
|
533
|
-
|
|
534
504
|
return End(final_result)
|
|
535
505
|
|
|
536
506
|
async def _handle_text_response(
|
|
@@ -58,7 +58,7 @@ def function_schema( # noqa: C901
|
|
|
58
58
|
Returns:
|
|
59
59
|
A `FunctionSchema` instance.
|
|
60
60
|
"""
|
|
61
|
-
config = ConfigDict(title=function.__name__)
|
|
61
|
+
config = ConfigDict(title=function.__name__, use_attribute_docstrings=True)
|
|
62
62
|
config_wrapper = ConfigWrapper(config)
|
|
63
63
|
gen_schema = _generate_schema.GenerateSchema(config_wrapper)
|
|
64
64
|
|
|
@@ -11,6 +11,7 @@ from functools import partial
|
|
|
11
11
|
from types import GenericAlias
|
|
12
12
|
from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar, Union
|
|
13
13
|
|
|
14
|
+
from anyio.to_thread import run_sync
|
|
14
15
|
from pydantic import BaseModel
|
|
15
16
|
from pydantic.json_schema import JsonSchemaValue
|
|
16
17
|
from typing_extensions import ParamSpec, TypeAlias, TypeGuard, is_typeddict
|
|
@@ -31,11 +32,8 @@ _R = TypeVar('_R')
|
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
async def run_in_executor(func: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
return await asyncio.get_running_loop().run_in_executor(None, partial(func, *args, **kwargs))
|
|
37
|
-
else:
|
|
38
|
-
return await asyncio.get_running_loop().run_in_executor(None, func, *args) # type: ignore
|
|
35
|
+
wrapped_func = partial(func, *args, **kwargs)
|
|
36
|
+
return await run_sync(wrapped_func)
|
|
39
37
|
|
|
40
38
|
|
|
41
39
|
def is_model_like(type_: Any) -> bool:
|
|
@@ -2,6 +2,7 @@ from __future__ import annotations as _annotations
|
|
|
2
2
|
|
|
3
3
|
import dataclasses
|
|
4
4
|
import inspect
|
|
5
|
+
import json
|
|
5
6
|
import warnings
|
|
6
7
|
from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
|
|
7
8
|
from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager
|
|
@@ -152,7 +153,10 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
152
153
|
model: models.Model | models.KnownModelName | str | None = None,
|
|
153
154
|
*,
|
|
154
155
|
output_type: type[OutputDataT] | ToolOutput[OutputDataT] = str,
|
|
155
|
-
instructions: str
|
|
156
|
+
instructions: str
|
|
157
|
+
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
158
|
+
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
159
|
+
| None = None,
|
|
156
160
|
system_prompt: str | Sequence[str] = (),
|
|
157
161
|
deps_type: type[AgentDepsT] = NoneType,
|
|
158
162
|
name: str | None = None,
|
|
@@ -175,7 +179,10 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
175
179
|
model: models.Model | models.KnownModelName | str | None = None,
|
|
176
180
|
*,
|
|
177
181
|
result_type: type[OutputDataT] = str,
|
|
178
|
-
instructions: str
|
|
182
|
+
instructions: str
|
|
183
|
+
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
184
|
+
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
185
|
+
| None = None,
|
|
179
186
|
system_prompt: str | Sequence[str] = (),
|
|
180
187
|
deps_type: type[AgentDepsT] = NoneType,
|
|
181
188
|
name: str | None = None,
|
|
@@ -197,7 +204,10 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
197
204
|
*,
|
|
198
205
|
# TODO change this back to `output_type: type[OutputDataT] | ToolOutput[OutputDataT] = str,` when we remove the overloads
|
|
199
206
|
output_type: Any = str,
|
|
200
|
-
instructions: str
|
|
207
|
+
instructions: str
|
|
208
|
+
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
209
|
+
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
210
|
+
| None = None,
|
|
201
211
|
system_prompt: str | Sequence[str] = (),
|
|
202
212
|
deps_type: type[AgentDepsT] = NoneType,
|
|
203
213
|
name: str | None = None,
|
|
@@ -296,10 +306,16 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
296
306
|
)
|
|
297
307
|
self._output_validators = []
|
|
298
308
|
|
|
299
|
-
self.
|
|
300
|
-
|
|
301
|
-
)
|
|
302
|
-
|
|
309
|
+
self._instructions = ''
|
|
310
|
+
self._instructions_functions = []
|
|
311
|
+
if isinstance(instructions, (str, Callable)):
|
|
312
|
+
instructions = [instructions]
|
|
313
|
+
for instruction in instructions or []:
|
|
314
|
+
if isinstance(instruction, str):
|
|
315
|
+
self._instructions += instruction + '\n'
|
|
316
|
+
else:
|
|
317
|
+
self._instructions_functions.append(_system_prompt.SystemPromptRunner(instruction))
|
|
318
|
+
self._instructions = self._instructions.strip() or None
|
|
303
319
|
|
|
304
320
|
self._system_prompts = (system_prompt,) if isinstance(system_prompt, str) else tuple(system_prompt)
|
|
305
321
|
self._system_prompt_functions = []
|
|
@@ -585,9 +601,10 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
585
601
|
)
|
|
586
602
|
|
|
587
603
|
# Build the initial state
|
|
604
|
+
usage = usage or _usage.Usage()
|
|
588
605
|
state = _agent_graph.GraphAgentState(
|
|
589
606
|
message_history=message_history[:] if message_history else [],
|
|
590
|
-
usage=usage
|
|
607
|
+
usage=usage,
|
|
591
608
|
retries=0,
|
|
592
609
|
run_step=0,
|
|
593
610
|
)
|
|
@@ -625,8 +642,8 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
625
642
|
|
|
626
643
|
instructions = self._instructions or ''
|
|
627
644
|
for instructions_runner in self._instructions_functions:
|
|
628
|
-
instructions += await instructions_runner.run(run_context)
|
|
629
|
-
return instructions
|
|
645
|
+
instructions += '\n' + await instructions_runner.run(run_context)
|
|
646
|
+
return instructions.strip()
|
|
630
647
|
|
|
631
648
|
graph_deps = _agent_graph.GraphAgentDeps[AgentDepsT, RunOutputDataT](
|
|
632
649
|
user_deps=deps,
|
|
@@ -641,7 +658,6 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
641
658
|
output_validators=output_validators,
|
|
642
659
|
function_tools=self._function_tools,
|
|
643
660
|
mcp_servers=self._mcp_servers,
|
|
644
|
-
run_span=run_span,
|
|
645
661
|
tracer=tracer,
|
|
646
662
|
get_instructions=get_instructions,
|
|
647
663
|
)
|
|
@@ -654,14 +670,51 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
654
670
|
system_prompt_dynamic_functions=self._system_prompt_dynamic_functions,
|
|
655
671
|
)
|
|
656
672
|
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
673
|
+
try:
|
|
674
|
+
async with graph.iter(
|
|
675
|
+
start_node,
|
|
676
|
+
state=state,
|
|
677
|
+
deps=graph_deps,
|
|
678
|
+
span=use_span(run_span) if run_span.is_recording() else None,
|
|
679
|
+
infer_name=False,
|
|
680
|
+
) as graph_run:
|
|
681
|
+
agent_run = AgentRun(graph_run)
|
|
682
|
+
yield agent_run
|
|
683
|
+
if (final_result := agent_run.result) is not None and run_span.is_recording():
|
|
684
|
+
run_span.set_attribute(
|
|
685
|
+
'final_result',
|
|
686
|
+
(
|
|
687
|
+
final_result.output
|
|
688
|
+
if isinstance(final_result.output, str)
|
|
689
|
+
else json.dumps(InstrumentedModel.serialize_any(final_result.output))
|
|
690
|
+
),
|
|
691
|
+
)
|
|
692
|
+
finally:
|
|
693
|
+
try:
|
|
694
|
+
if run_span.is_recording():
|
|
695
|
+
run_span.set_attributes(self._run_span_end_attributes(state, usage))
|
|
696
|
+
finally:
|
|
697
|
+
run_span.end()
|
|
698
|
+
|
|
699
|
+
def _run_span_end_attributes(self, state: _agent_graph.GraphAgentState, usage: _usage.Usage):
|
|
700
|
+
return {
|
|
701
|
+
**usage.opentelemetry_attributes(),
|
|
702
|
+
'all_messages_events': json.dumps(
|
|
703
|
+
[
|
|
704
|
+
InstrumentedModel.event_to_dict(e)
|
|
705
|
+
for e in InstrumentedModel.messages_to_otel_events(state.message_history)
|
|
706
|
+
]
|
|
707
|
+
),
|
|
708
|
+
'logfire.json_schema': json.dumps(
|
|
709
|
+
{
|
|
710
|
+
'type': 'object',
|
|
711
|
+
'properties': {
|
|
712
|
+
'all_messages_events': {'type': 'array'},
|
|
713
|
+
'final_result': {'type': 'object'},
|
|
714
|
+
},
|
|
715
|
+
}
|
|
716
|
+
),
|
|
717
|
+
}
|
|
665
718
|
|
|
666
719
|
@overload
|
|
667
720
|
def run_sync(
|
|
@@ -328,7 +328,7 @@ class GeminiModel(Model):
|
|
|
328
328
|
content.append(
|
|
329
329
|
_GeminiInlineDataPart(inline_data={'data': base64_encoded, 'mime_type': item.media_type})
|
|
330
330
|
)
|
|
331
|
-
elif isinstance(item, (AudioUrl, ImageUrl, DocumentUrl)):
|
|
331
|
+
elif isinstance(item, (AudioUrl, ImageUrl, DocumentUrl, VideoUrl)):
|
|
332
332
|
client = cached_async_http_client()
|
|
333
333
|
response = await client.get(item.url, follow_redirects=True)
|
|
334
334
|
response.raise_for_status()
|
|
@@ -337,8 +337,6 @@ class GeminiModel(Model):
|
|
|
337
337
|
inline_data={'data': base64.b64encode(response.content).decode('utf-8'), 'mime_type': mime_type}
|
|
338
338
|
)
|
|
339
339
|
content.append(inline_data)
|
|
340
|
-
elif isinstance(item, VideoUrl): # pragma: no cover
|
|
341
|
-
raise NotImplementedError('VideoUrl is not supported for Gemini.')
|
|
342
340
|
else:
|
|
343
341
|
assert_never(item)
|
|
344
342
|
return content
|
|
@@ -439,12 +439,13 @@ class OpenAIModel(Model):
|
|
|
439
439
|
)
|
|
440
440
|
else: # pragma: no cover
|
|
441
441
|
raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
|
|
442
|
-
elif isinstance(item, AudioUrl):
|
|
442
|
+
elif isinstance(item, AudioUrl):
|
|
443
443
|
client = cached_async_http_client()
|
|
444
444
|
response = await client.get(item.url)
|
|
445
445
|
response.raise_for_status()
|
|
446
446
|
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
447
|
-
|
|
447
|
+
audio_format: Any = response.headers['content-type'].removeprefix('audio/')
|
|
448
|
+
audio = InputAudio(data=base64_encoded, format=audio_format)
|
|
448
449
|
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
449
450
|
elif isinstance(item, DocumentUrl):
|
|
450
451
|
client = cached_async_http_client()
|
|
@@ -453,10 +454,7 @@ class OpenAIModel(Model):
|
|
|
453
454
|
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
454
455
|
media_type = response.headers.get('content-type').split(';')[0]
|
|
455
456
|
file_data = f'data:{media_type};base64,{base64_encoded}'
|
|
456
|
-
file = File(
|
|
457
|
-
file=FileFile(file_data=file_data, filename=f'filename.{item.format}'),
|
|
458
|
-
type='file',
|
|
459
|
-
)
|
|
457
|
+
file = File(file=FileFile(file_data=file_data, filename=f'filename.{item.format}'), type='file')
|
|
460
458
|
content.append(file)
|
|
461
459
|
elif isinstance(item, VideoUrl): # pragma: no cover
|
|
462
460
|
raise NotImplementedError('VideoUrl is not supported for OpenAI')
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|