pydantic-ai-slim 0.0.47__tar.gz → 0.0.49__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (52) hide show
  1. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_cli.py +82 -59
  3. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_result.py +7 -3
  4. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/__init__.py +2 -0
  5. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/anthropic.py +5 -41
  6. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/cohere.py +1 -1
  7. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/gemini.py +1 -0
  8. pydantic_ai_slim-0.0.49/pydantic_ai/models/openai.py +908 -0
  9. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/tools.py +2 -2
  10. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pyproject.toml +3 -3
  11. pydantic_ai_slim-0.0.47/pydantic_ai/models/openai.py +0 -460
  12. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/.gitignore +0 -0
  13. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/README.md +0 -0
  14. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/__init__.py +0 -0
  15. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/__main__.py +0 -0
  16. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_agent_graph.py +0 -0
  17. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_griffe.py +0 -0
  18. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_parts_manager.py +0 -0
  19. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_pydantic.py +0 -0
  20. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_system_prompt.py +0 -0
  21. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/_utils.py +0 -0
  22. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/agent.py +0 -0
  23. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/common_tools/__init__.py +0 -0
  24. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  25. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/common_tools/tavily.py +0 -0
  26. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/exceptions.py +0 -0
  27. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/format_as_xml.py +0 -0
  28. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/mcp.py +0 -0
  29. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/messages.py +0 -0
  30. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/bedrock.py +0 -0
  31. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/fallback.py +0 -0
  32. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/function.py +0 -0
  33. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/groq.py +0 -0
  34. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/instrumented.py +0 -0
  35. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/mistral.py +0 -0
  36. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/test.py +0 -0
  37. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/models/wrapper.py +0 -0
  38. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/__init__.py +0 -0
  39. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/anthropic.py +0 -0
  40. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/azure.py +0 -0
  41. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/bedrock.py +0 -0
  42. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/cohere.py +0 -0
  43. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/deepseek.py +0 -0
  44. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/google_gla.py +0 -0
  45. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/google_vertex.py +0 -0
  46. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/groq.py +0 -0
  47. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/mistral.py +0 -0
  48. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/providers/openai.py +0 -0
  49. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/py.typed +0 -0
  50. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/result.py +0 -0
  51. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/settings.py +0 -0
  52. {pydantic_ai_slim-0.0.47 → pydantic_ai_slim-0.0.49}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.47
3
+ Version: 0.0.49
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.0.47
32
+ Requires-Dist: pydantic-graph==0.0.49
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
45
45
  Provides-Extra: duckduckgo
46
46
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
47
47
  Provides-Extra: evals
48
- Requires-Dist: pydantic-evals==0.0.41; extra == 'evals'
48
+ Requires-Dist: pydantic-evals==0.0.49; extra == 'evals'
49
49
  Provides-Extra: groq
50
50
  Requires-Dist: groq>=0.15.0; extra == 'groq'
51
51
  Provides-Extra: logfire
@@ -3,19 +3,20 @@ from __future__ import annotations as _annotations
3
3
  import argparse
4
4
  import asyncio
5
5
  import sys
6
+ from asyncio import CancelledError
6
7
  from collections.abc import Sequence
7
8
  from contextlib import ExitStack
8
9
  from datetime import datetime, timezone
9
10
  from importlib.metadata import version
10
11
  from pathlib import Path
11
- from typing import cast
12
+ from typing import Any, cast
12
13
 
13
14
  from typing_inspection.introspection import get_literal_values
14
15
 
15
16
  from pydantic_ai.agent import Agent
16
17
  from pydantic_ai.exceptions import UserError
17
18
  from pydantic_ai.messages import ModelMessage, PartDeltaEvent, TextPartDelta
18
- from pydantic_ai.models import KnownModelName
19
+ from pydantic_ai.models import KnownModelName, infer_model
19
20
 
20
21
  try:
21
22
  import argcomplete
@@ -47,7 +48,7 @@ class SimpleCodeBlock(CodeBlock):
47
48
  This avoids a background color which messes up copy-pasting and sets the language name as dim prefix and suffix.
48
49
  """
49
50
 
50
- def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult: # pragma: no cover
51
+ def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
51
52
  code = str(self.text).rstrip()
52
53
  yield Text(self.lexer_name, style='dim')
53
54
  yield Syntax(code, self.lexer_name, theme=self.theme, background_color='default', word_wrap=True)
@@ -57,7 +58,7 @@ class SimpleCodeBlock(CodeBlock):
57
58
  class LeftHeading(Heading):
58
59
  """Customised headings in markdown to stop centering and prepend markdown style hashes."""
59
60
 
60
- def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult: # pragma: no cover
61
+ def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
61
62
  # note we use `Style(bold=True)` not `self.style_name` here to disable underlining which is ugly IMHO
62
63
  yield Text(f'{"#" * int(self.tag[1:])} {self.text.plain}', style=Style(bold=True))
63
64
 
@@ -68,7 +69,21 @@ Markdown.elements.update(
68
69
  )
69
70
 
70
71
 
71
- def cli(args_list: Sequence[str] | None = None) -> int: # noqa: C901 # pragma: no cover
72
+ cli_agent = Agent()
73
+
74
+
75
+ @cli_agent.system_prompt
76
+ def cli_system_prompt() -> str:
77
+ now_utc = datetime.now(timezone.utc)
78
+ tzinfo = now_utc.astimezone().tzinfo
79
+ tzname = tzinfo.tzname(now_utc) if tzinfo else ''
80
+ return f"""\
81
+ Help the user by responding to their request, the output should be concise and always written in markdown.
82
+ The current date and time is {datetime.now()} {tzname}.
83
+ The user is running {sys.platform}."""
84
+
85
+
86
+ def cli(args_list: Sequence[str] | None = None) -> int:
72
87
  parser = argparse.ArgumentParser(
73
88
  prog='pai',
74
89
  description=f"""\
@@ -124,18 +139,10 @@ Special prompt:
124
139
  console.print(f' {model}', highlight=False)
125
140
  return 0
126
141
 
127
- now_utc = datetime.now(timezone.utc)
128
- tzname = now_utc.astimezone().tzinfo.tzname(now_utc) # type: ignore
129
142
  try:
130
- agent = Agent(
131
- model=args.model,
132
- system_prompt=f"""\
133
- Help the user by responding to their request, the output should be concise and always written in markdown.
134
- The current date and time is {datetime.now()} {tzname}.
135
- The user is running {sys.platform}.""",
136
- )
137
- except UserError:
138
- console.print(f'[red]Invalid model "{args.model}"[/red]')
143
+ cli_agent.model = infer_model(args.model)
144
+ except UserError as e:
145
+ console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]')
139
146
  return 1
140
147
 
141
148
  stream = not args.no_stream
@@ -148,67 +155,44 @@ Special prompt:
148
155
 
149
156
  if prompt := cast(str, args.prompt):
150
157
  try:
151
- asyncio.run(ask_agent(agent, prompt, stream, console, code_theme))
158
+ asyncio.run(ask_agent(cli_agent, prompt, stream, console, code_theme))
152
159
  except KeyboardInterrupt:
153
160
  pass
154
161
  return 0
155
162
 
156
163
  history = Path.home() / '.pai-prompt-history.txt'
157
- session = PromptSession(history=FileHistory(str(history))) # type: ignore
164
+ # doing this instead of `PromptSession[Any](history=` allows mocking of PromptSession in tests
165
+ session: PromptSession[Any] = PromptSession(history=FileHistory(str(history)))
166
+ try:
167
+ return asyncio.run(run_chat(session, stream, cli_agent, console, code_theme))
168
+ except KeyboardInterrupt: # pragma: no cover
169
+ return 0
170
+
171
+
172
+ async def run_chat(session: PromptSession[Any], stream: bool, agent: Agent, console: Console, code_theme: str) -> int:
158
173
  multiline = False
159
174
  messages: list[ModelMessage] = []
160
175
 
161
176
  while True:
162
177
  try:
163
178
  auto_suggest = CustomAutoSuggest(['/markdown', '/multiline', '/exit'])
164
- text = cast(str, session.prompt('pai ➤ ', auto_suggest=auto_suggest, multiline=multiline))
165
- except (KeyboardInterrupt, EOFError):
179
+ text = await session.prompt_async('pai ➤ ', auto_suggest=auto_suggest, multiline=multiline)
180
+ except (KeyboardInterrupt, EOFError): # pragma: no cover
166
181
  return 0
167
182
 
168
183
  if not text.strip():
169
184
  continue
170
185
 
171
- ident_prompt = text.lower().strip(' ').replace(' ', '-').lstrip(' ')
186
+ ident_prompt = text.lower().strip().replace(' ', '-')
172
187
  if ident_prompt.startswith('/'):
173
- if ident_prompt == '/markdown':
174
- try:
175
- parts = messages[-1].parts
176
- except IndexError:
177
- console.print('[dim]No markdown output available.[/dim]')
178
- continue
179
- console.print('[dim]Markdown output of last question:[/dim]\n')
180
- for part in parts:
181
- if part.part_kind == 'text':
182
- console.print(
183
- Syntax(
184
- part.content,
185
- lexer='markdown',
186
- theme=code_theme,
187
- word_wrap=True,
188
- background_color='default',
189
- )
190
- )
191
-
192
- elif ident_prompt == '/multiline':
193
- multiline = not multiline
194
- if multiline:
195
- console.print(
196
- 'Enabling multiline mode. '
197
- '[dim]Press [Meta+Enter] or [Esc] followed by [Enter] to accept input.[/dim]'
198
- )
199
- else:
200
- console.print('Disabling multiline mode.')
201
- elif ident_prompt == '/exit':
202
- console.print('[dim]Exiting…[/dim]')
203
- return 0
204
- else:
205
- console.print(f'[red]Unknown command[/red] [magenta]`{ident_prompt}`[/magenta]')
188
+ exit_value, multiline = handle_slash_command(ident_prompt, messages, multiline, console, code_theme)
189
+ if exit_value is not None:
190
+ return exit_value
206
191
  else:
207
192
  try:
208
- messages = asyncio.run(ask_agent(agent, text, stream, console, code_theme, messages))
209
- except KeyboardInterrupt:
193
+ messages = await ask_agent(agent, text, stream, console, code_theme, messages)
194
+ except CancelledError: # pragma: no cover
210
195
  console.print('[dim]Interrupted[/dim]')
211
- messages = []
212
196
 
213
197
 
214
198
  async def ask_agent(
@@ -218,7 +202,7 @@ async def ask_agent(
218
202
  console: Console,
219
203
  code_theme: str,
220
204
  messages: list[ModelMessage] | None = None,
221
- ) -> list[ModelMessage]: # pragma: no cover
205
+ ) -> list[ModelMessage]:
222
206
  status = Status('[dim]Working on it…[/dim]', console=console)
223
207
 
224
208
  if not stream:
@@ -248,7 +232,7 @@ async def ask_agent(
248
232
 
249
233
 
250
234
  class CustomAutoSuggest(AutoSuggestFromHistory):
251
- def __init__(self, special_suggestions: list[str] | None = None): # pragma: no cover
235
+ def __init__(self, special_suggestions: list[str] | None = None):
252
236
  super().__init__()
253
237
  self.special_suggestions = special_suggestions or []
254
238
 
@@ -264,5 +248,44 @@ class CustomAutoSuggest(AutoSuggestFromHistory):
264
248
  return suggestion
265
249
 
266
250
 
251
+ def handle_slash_command(
252
+ ident_prompt: str, messages: list[ModelMessage], multiline: bool, console: Console, code_theme: str
253
+ ) -> tuple[int | None, bool]:
254
+ if ident_prompt == '/markdown':
255
+ try:
256
+ parts = messages[-1].parts
257
+ except IndexError:
258
+ console.print('[dim]No markdown output available.[/dim]')
259
+ else:
260
+ console.print('[dim]Markdown output of last question:[/dim]\n')
261
+ for part in parts:
262
+ if part.part_kind == 'text':
263
+ console.print(
264
+ Syntax(
265
+ part.content,
266
+ lexer='markdown',
267
+ theme=code_theme,
268
+ word_wrap=True,
269
+ background_color='default',
270
+ )
271
+ )
272
+
273
+ elif ident_prompt == '/multiline':
274
+ multiline = not multiline
275
+ if multiline:
276
+ console.print(
277
+ 'Enabling multiline mode. [dim]Press [Meta+Enter] or [Esc] followed by [Enter] to accept input.[/dim]'
278
+ )
279
+ else:
280
+ console.print('Disabling multiline mode.')
281
+ return None, multiline
282
+ elif ident_prompt == '/exit':
283
+ console.print('[dim]Exiting…[/dim]')
284
+ return 0, multiline
285
+ else:
286
+ console.print(f'[red]Unknown command[/red] [magenta]`{ident_prompt}`[/magenta]')
287
+ return None, multiline
288
+
289
+
267
290
  def app(): # pragma: no cover
268
291
  sys.exit(cli())
@@ -13,7 +13,7 @@ from typing_inspection.introspection import is_union_origin
13
13
  from . import _utils, messages as _messages
14
14
  from .exceptions import ModelRetry
15
15
  from .result import ResultDataT, ResultDataT_inv, ResultValidatorFunc
16
- from .tools import AgentDepsT, RunContext, ToolDefinition
16
+ from .tools import AgentDepsT, GenerateToolJsonSchema, RunContext, ToolDefinition
17
17
 
18
18
  T = TypeVar('T')
19
19
  """An invariant TypeVar."""
@@ -159,7 +159,9 @@ class ResultTool(Generic[ResultDataT]):
159
159
  self.type_adapter = TypeAdapter(response_type)
160
160
  outer_typed_dict_key: str | None = None
161
161
  # noinspection PyArgumentList
162
- parameters_json_schema = _utils.check_object_json_schema(self.type_adapter.json_schema())
162
+ parameters_json_schema = _utils.check_object_json_schema(
163
+ self.type_adapter.json_schema(schema_generator=GenerateToolJsonSchema)
164
+ )
163
165
  else:
164
166
  response_data_typed_dict = TypedDict( # noqa: UP013
165
167
  'response_data_typed_dict',
@@ -168,7 +170,9 @@ class ResultTool(Generic[ResultDataT]):
168
170
  self.type_adapter = TypeAdapter(response_data_typed_dict)
169
171
  outer_typed_dict_key = 'response'
170
172
  # noinspection PyArgumentList
171
- parameters_json_schema = _utils.check_object_json_schema(self.type_adapter.json_schema())
173
+ parameters_json_schema = _utils.check_object_json_schema(
174
+ self.type_adapter.json_schema(schema_generator=GenerateToolJsonSchema)
175
+ )
172
176
  # including `response_data_typed_dict` as a title here doesn't add anything and could confuse the LLM
173
177
  parameters_json_schema.pop('title')
174
178
 
@@ -106,6 +106,7 @@ KnownModelName = TypeAliasType(
106
106
  'google-gla:gemini-2.0-flash',
107
107
  'google-gla:gemini-2.0-flash-lite-preview-02-05',
108
108
  'google-gla:gemini-2.0-pro-exp-02-05',
109
+ 'google-gla:gemini-2.5-pro-exp-03-25',
109
110
  'google-vertex:gemini-1.0-pro',
110
111
  'google-vertex:gemini-1.5-flash',
111
112
  'google-vertex:gemini-1.5-flash-8b',
@@ -116,6 +117,7 @@ KnownModelName = TypeAliasType(
116
117
  'google-vertex:gemini-2.0-flash',
117
118
  'google-vertex:gemini-2.0-flash-lite-preview-02-05',
118
119
  'google-vertex:gemini-2.0-pro-exp-02-05',
120
+ 'google-vertex:gemini-2.5-pro-exp-03-25',
119
121
  'gpt-3.5-turbo',
120
122
  'gpt-3.5-turbo-0125',
121
123
  'gpt-3.5-turbo-0301',
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
- import base64
4
3
  import io
5
4
  from collections.abc import AsyncGenerator, AsyncIterable, AsyncIterator
6
5
  from contextlib import asynccontextmanager
@@ -9,7 +8,6 @@ from datetime import datetime, timezone
9
8
  from json import JSONDecodeError, loads as json_loads
10
9
  from typing import Any, Literal, Union, cast, overload
11
10
 
12
- from anthropic.types import DocumentBlockParam
13
11
  from typing_extensions import assert_never
14
12
 
15
13
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
@@ -40,6 +38,7 @@ try:
40
38
  from anthropic.types import (
41
39
  Base64PDFSourceParam,
42
40
  ContentBlock,
41
+ DocumentBlockParam,
43
42
  ImageBlockParam,
44
43
  Message as AnthropicMessage,
45
44
  MessageParam,
@@ -354,48 +353,13 @@ class AnthropicModel(Model):
354
353
  else:
355
354
  raise RuntimeError('Only images and PDFs are supported for binary content')
356
355
  elif isinstance(item, ImageUrl):
357
- try:
358
- response = await cached_async_http_client().get(item.url)
359
- response.raise_for_status()
360
- yield ImageBlockParam(
361
- source={
362
- 'data': io.BytesIO(response.content),
363
- 'media_type': item.media_type,
364
- 'type': 'base64',
365
- },
366
- type='image',
367
- )
368
- except ValueError:
369
- # Download the file if can't find the mime type.
370
- client = cached_async_http_client()
371
- response = await client.get(item.url, follow_redirects=True)
372
- response.raise_for_status()
373
- base64_encoded = base64.b64encode(response.content).decode('utf-8')
374
- if (mime_type := response.headers['Content-Type']) in (
375
- 'image/jpeg',
376
- 'image/png',
377
- 'image/gif',
378
- 'image/webp',
379
- ):
380
- yield ImageBlockParam(
381
- source={'data': base64_encoded, 'media_type': mime_type, 'type': 'base64'},
382
- type='image',
383
- )
384
- else: # pragma: no cover
385
- raise RuntimeError(f'Unsupported image type: {mime_type}')
356
+ yield ImageBlockParam(source={'type': 'url', 'url': item.url}, type='image')
386
357
  elif isinstance(item, DocumentUrl):
387
- response = await cached_async_http_client().get(item.url)
388
- response.raise_for_status()
389
358
  if item.media_type == 'application/pdf':
390
- yield DocumentBlockParam(
391
- source=Base64PDFSourceParam(
392
- data=io.BytesIO(response.content),
393
- media_type=item.media_type,
394
- type='base64',
395
- ),
396
- type='document',
397
- )
359
+ yield DocumentBlockParam(source={'url': item.url, 'type': 'url'}, type='document')
398
360
  elif item.media_type == 'text/plain':
361
+ response = await cached_async_http_client().get(item.url)
362
+ response.raise_for_status()
399
363
  yield DocumentBlockParam(
400
364
  source=PlainTextSourceParam(data=response.text, media_type=item.media_type, type='text'),
401
365
  type='document',
@@ -5,7 +5,6 @@ from dataclasses import dataclass, field
5
5
  from itertools import chain
6
6
  from typing import Literal, Union, cast
7
7
 
8
- from cohere import TextAssistantMessageContentItem
9
8
  from typing_extensions import assert_never
10
9
 
11
10
  from .. import ModelHTTPError, result
@@ -38,6 +37,7 @@ try:
38
37
  ChatMessageV2,
39
38
  ChatResponse,
40
39
  SystemChatMessageV2,
40
+ TextAssistantMessageContentItem,
41
41
  ToolCallV2,
42
42
  ToolCallV2Function,
43
43
  ToolChatMessageV2,
@@ -57,6 +57,7 @@ LatestGeminiModelNames = Literal[
57
57
  'gemini-2.0-flash',
58
58
  'gemini-2.0-flash-lite-preview-02-05',
59
59
  'gemini-2.0-pro-exp-02-05',
60
+ 'gemini-2.5-pro-exp-03-25',
60
61
  ]
61
62
  """Latest Gemini models."""
62
63