pydantic-ai-slim 0.0.43__tar.gz → 0.0.44__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (51) hide show
  1. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_cli.py +1 -1
  3. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_griffe.py +29 -2
  4. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/agent.py +2 -2
  5. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/__init__.py +15 -14
  6. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/cohere.py +38 -4
  7. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/instrumented.py +14 -3
  8. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/openai.py +2 -8
  9. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/__init__.py +4 -0
  10. pydantic_ai_slim-0.0.44/pydantic_ai/providers/cohere.py +72 -0
  11. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pyproject.toml +3 -3
  12. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/.gitignore +0 -0
  13. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/README.md +0 -0
  14. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/__init__.py +0 -0
  15. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_agent_graph.py +0 -0
  16. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_parts_manager.py +0 -0
  17. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_pydantic.py +0 -0
  18. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_result.py +0 -0
  19. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_system_prompt.py +0 -0
  20. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/_utils.py +0 -0
  21. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/common_tools/__init__.py +0 -0
  22. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  23. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/common_tools/tavily.py +0 -0
  24. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/exceptions.py +0 -0
  25. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/format_as_xml.py +0 -0
  26. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/mcp.py +0 -0
  27. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/messages.py +0 -0
  28. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/anthropic.py +0 -0
  29. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/bedrock.py +0 -0
  30. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/fallback.py +0 -0
  31. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/function.py +0 -0
  32. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/gemini.py +0 -0
  33. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/groq.py +0 -0
  34. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/mistral.py +0 -0
  35. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/test.py +0 -0
  36. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/vertexai.py +0 -0
  37. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/wrapper.py +0 -0
  38. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/anthropic.py +0 -0
  39. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/azure.py +0 -0
  40. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/bedrock.py +0 -0
  41. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/deepseek.py +0 -0
  42. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/google_gla.py +0 -0
  43. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/google_vertex.py +0 -0
  44. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/groq.py +0 -0
  45. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/mistral.py +0 -0
  46. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/openai.py +0 -0
  47. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/py.typed +0 -0
  48. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/result.py +0 -0
  49. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/settings.py +0 -0
  50. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/tools.py +0 -0
  51. {pydantic_ai_slim-0.0.43 → pydantic_ai_slim-0.0.44}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.43
3
+ Version: 0.0.44
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.0.43
32
+ Requires-Dist: pydantic-graph==0.0.44
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -53,7 +53,7 @@ Requires-Dist: mcp>=1.4.1; (python_version >= '3.10') and extra == 'mcp'
53
53
  Provides-Extra: mistral
54
54
  Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
55
55
  Provides-Extra: openai
56
- Requires-Dist: openai>=1.65.1; extra == 'openai'
56
+ Requires-Dist: openai>=1.67.0; extra == 'openai'
57
57
  Provides-Extra: tavily
58
58
  Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
59
59
  Provides-Extra: vertexai
@@ -37,7 +37,7 @@ except ImportError as _import_error:
37
37
  from pydantic_ai.agent import Agent
38
38
  from pydantic_ai.messages import ModelMessage, PartDeltaEvent, TextPartDelta
39
39
 
40
- __version__ = version('pydantic-ai')
40
+ __version__ = version('pydantic-ai-slim')
41
41
 
42
42
 
43
43
  class SimpleCodeBlock(CodeBlock):
@@ -22,8 +22,16 @@ def doc_descriptions(
22
22
  ) -> tuple[str, dict[str, str]]:
23
23
  """Extract the function description and parameter descriptions from a function's docstring.
24
24
 
25
+ The function parses the docstring using the specified format (or infers it if 'auto')
26
+ and extracts both the main description and parameter descriptions. If a returns section
27
+ is present in the docstring, the main description will be formatted as XML.
28
+
25
29
  Returns:
26
- A tuple of (main function description, parameter descriptions).
30
+ A tuple containing:
31
+ - str: Main description string, which may be either:
32
+ * Plain text if no returns section is present
33
+ * XML-formatted if returns section exists, including <summary> and <returns> tags
34
+ - dict[str, str]: Dictionary mapping parameter names to their descriptions
27
35
  """
28
36
  doc = func.__doc__
29
37
  if doc is None:
@@ -33,7 +41,14 @@ def doc_descriptions(
33
41
  parent = cast(GriffeObject, sig)
34
42
 
35
43
  docstring_style = _infer_docstring_style(doc) if docstring_format == 'auto' else docstring_format
36
- docstring = Docstring(doc, lineno=1, parser=docstring_style, parent=parent)
44
+ docstring = Docstring(
45
+ doc,
46
+ lineno=1,
47
+ parser=docstring_style,
48
+ parent=parent,
49
+ # https://mkdocstrings.github.io/griffe/reference/docstrings/#google-options
50
+ parser_options={'returns_named_value': False, 'returns_multiple_items': False},
51
+ )
37
52
  with _disable_griffe_logging():
38
53
  sections = docstring.parse()
39
54
 
@@ -45,6 +60,18 @@ def doc_descriptions(
45
60
  if main := next((p for p in sections if p.kind == DocstringSectionKind.text), None):
46
61
  main_desc = main.value
47
62
 
63
+ if return_ := next((p for p in sections if p.kind == DocstringSectionKind.returns), None):
64
+ return_statement = return_.value[0]
65
+ return_desc = return_statement.description
66
+ return_type = return_statement.annotation
67
+ type_tag = f'<type>{return_type}</type>\n' if return_type else ''
68
+ return_xml = f'<returns>\n{type_tag}<description>{return_desc}</description>\n</returns>'
69
+
70
+ if main_desc:
71
+ main_desc = f'<summary>{main_desc}</summary>\n{return_xml}'
72
+ else:
73
+ main_desc = return_xml
74
+
48
75
  return main_desc, params
49
76
 
50
77
 
@@ -13,7 +13,7 @@ from pydantic.json_schema import GenerateJsonSchema
13
13
  from typing_extensions import TypeGuard, TypeVar, deprecated
14
14
 
15
15
  from pydantic_graph import End, Graph, GraphRun, GraphRunContext
16
- from pydantic_graph._utils import get_event_loop
16
+ from pydantic_graph._utils import run_until_complete
17
17
 
18
18
  from . import (
19
19
  _agent_graph,
@@ -567,7 +567,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
567
567
  """
568
568
  if infer_name and self.name is None:
569
569
  self._infer_name(inspect.currentframe())
570
- return get_event_loop().run_until_complete(
570
+ return run_until_complete(
571
571
  self.run(
572
572
  user_prompt,
573
573
  result_type=result_type,
@@ -12,7 +12,7 @@ from contextlib import asynccontextmanager, contextmanager
12
12
  from dataclasses import dataclass, field
13
13
  from datetime import datetime
14
14
  from functools import cache
15
- from typing import TYPE_CHECKING
15
+ from typing import TYPE_CHECKING, cast
16
16
 
17
17
  import httpx
18
18
  from typing_extensions import Literal
@@ -133,8 +133,6 @@ KnownModelName = Literal[
133
133
  'gpt-4-turbo-2024-04-09',
134
134
  'gpt-4-turbo-preview',
135
135
  'gpt-4-vision-preview',
136
- 'gpt-4.5-preview',
137
- 'gpt-4.5-preview-2025-02-27',
138
136
  'gpt-4o',
139
137
  'gpt-4o-2024-05-13',
140
138
  'gpt-4o-2024-08-06',
@@ -146,6 +144,10 @@ KnownModelName = Literal[
146
144
  'gpt-4o-mini-2024-07-18',
147
145
  'gpt-4o-mini-audio-preview',
148
146
  'gpt-4o-mini-audio-preview-2024-12-17',
147
+ 'gpt-4o-mini-search-preview',
148
+ 'gpt-4o-mini-search-preview-2025-03-11',
149
+ 'gpt-4o-search-preview',
150
+ 'gpt-4o-search-preview-2025-03-11',
149
151
  'groq:gemma2-9b-it',
150
152
  'groq:llama-3.1-8b-instant',
151
153
  'groq:llama-3.2-11b-vision-preview',
@@ -189,8 +191,6 @@ KnownModelName = Literal[
189
191
  'openai:gpt-4-turbo-2024-04-09',
190
192
  'openai:gpt-4-turbo-preview',
191
193
  'openai:gpt-4-vision-preview',
192
- 'openai:gpt-4.5-preview',
193
- 'openai:gpt-4.5-preview-2025-02-27',
194
194
  'openai:gpt-4o',
195
195
  'openai:gpt-4o-2024-05-13',
196
196
  'openai:gpt-4o-2024-08-06',
@@ -202,6 +202,10 @@ KnownModelName = Literal[
202
202
  'openai:gpt-4o-mini-2024-07-18',
203
203
  'openai:gpt-4o-mini-audio-preview',
204
204
  'openai:gpt-4o-mini-audio-preview-2024-12-17',
205
+ 'openai:gpt-4o-mini-search-preview',
206
+ 'openai:gpt-4o-mini-search-preview-2025-03-11',
207
+ 'openai:gpt-4o-search-preview',
208
+ 'openai:gpt-4o-search-preview-2025-03-11',
205
209
  'openai:o1',
206
210
  'openai:o1-2024-12-17',
207
211
  'openai:o1-mini',
@@ -379,6 +383,7 @@ def infer_model(model: Model | KnownModelName) -> Model:
379
383
 
380
384
  try:
381
385
  provider, model_name = model.split(':', maxsplit=1)
386
+ provider = cast(str, provider)
382
387
  except ValueError:
383
388
  model_name = model
384
389
  # TODO(Marcelo): We should deprecate this way.
@@ -397,8 +402,7 @@ def infer_model(model: Model | KnownModelName) -> Model:
397
402
  if provider == 'cohere':
398
403
  from .cohere import CohereModel
399
404
 
400
- # TODO(Marcelo): Missing provider API.
401
- return CohereModel(model_name)
405
+ return CohereModel(model_name, provider=provider)
402
406
  elif provider in ('deepseek', 'openai'):
403
407
  from .openai import OpenAIModel
404
408
 
@@ -410,22 +414,19 @@ def infer_model(model: Model | KnownModelName) -> Model:
410
414
  elif provider == 'groq':
411
415
  from .groq import GroqModel
412
416
 
413
- # TODO(Marcelo): Missing provider API.
414
- return GroqModel(model_name)
417
+ return GroqModel(model_name, provider=provider)
415
418
  elif provider == 'mistral':
416
419
  from .mistral import MistralModel
417
420
 
418
- # TODO(Marcelo): Missing provider API.
419
- return MistralModel(model_name)
421
+ return MistralModel(model_name, provider=provider)
420
422
  elif provider == 'anthropic':
421
423
  from .anthropic import AnthropicModel
422
424
 
423
- # TODO(Marcelo): Missing provider API.
424
- return AnthropicModel(model_name)
425
+ return AnthropicModel(model_name, provider=provider)
425
426
  elif provider == 'bedrock':
426
427
  from .bedrock import BedrockConverseModel
427
428
 
428
- return BedrockConverseModel(model_name)
429
+ return BedrockConverseModel(model_name, provider=provider)
429
430
  else:
430
431
  raise UserError(f'Unknown model: {model}')
431
432
 
@@ -3,11 +3,11 @@ from __future__ import annotations as _annotations
3
3
  from collections.abc import Iterable
4
4
  from dataclasses import dataclass, field
5
5
  from itertools import chain
6
- from typing import Literal, Union, cast
6
+ from typing import Literal, Union, cast, overload
7
7
 
8
8
  from cohere import TextAssistantMessageContentItem
9
9
  from httpx import AsyncClient as AsyncHTTPClient
10
- from typing_extensions import assert_never
10
+ from typing_extensions import assert_never, deprecated
11
11
 
12
12
  from .. import ModelHTTPError, result
13
13
  from .._utils import guard_tool_call_id as _guard_tool_call_id
@@ -23,11 +23,13 @@ from ..messages import (
23
23
  ToolReturnPart,
24
24
  UserPromptPart,
25
25
  )
26
+ from ..providers import Provider, infer_provider
26
27
  from ..settings import ModelSettings
27
28
  from ..tools import ToolDefinition
28
29
  from . import (
29
30
  Model,
30
31
  ModelRequestParameters,
32
+ cached_async_http_client,
31
33
  check_allow_model_requests,
32
34
  )
33
35
 
@@ -100,10 +102,34 @@ class CohereModel(Model):
100
102
  _model_name: CohereModelName = field(repr=False)
101
103
  _system: str = field(default='cohere', repr=False)
102
104
 
105
+ @overload
103
106
  def __init__(
104
107
  self,
105
108
  model_name: CohereModelName,
106
109
  *,
110
+ provider: Literal['cohere'] | Provider[AsyncClientV2] = 'cohere',
111
+ api_key: None = None,
112
+ cohere_client: None = None,
113
+ http_client: None = None,
114
+ ) -> None: ...
115
+
116
+ @deprecated('Use the `provider` parameter instead of `api_key`, `cohere_client`, and `http_client`.')
117
+ @overload
118
+ def __init__(
119
+ self,
120
+ model_name: CohereModelName,
121
+ *,
122
+ provider: None = None,
123
+ api_key: str | None = None,
124
+ cohere_client: AsyncClientV2 | None = None,
125
+ http_client: AsyncHTTPClient | None = None,
126
+ ) -> None: ...
127
+
128
+ def __init__(
129
+ self,
130
+ model_name: CohereModelName,
131
+ *,
132
+ provider: Literal['cohere'] | Provider[AsyncClientV2] | None = None,
107
133
  api_key: str | None = None,
108
134
  cohere_client: AsyncClientV2 | None = None,
109
135
  http_client: AsyncHTTPClient | None = None,
@@ -113,6 +139,9 @@ class CohereModel(Model):
113
139
  Args:
114
140
  model_name: The name of the Cohere model to use. List of model names
115
141
  available [here](https://docs.cohere.com/docs/models#command).
142
+ provider: The provider to use for authentication and API access. Can be either the string
143
+ 'cohere' or an instance of `Provider[AsyncClientV2]`. If not provided, a new provider will be
144
+ created using the other parameters.
116
145
  api_key: The API key to use for authentication, if not provided, the
117
146
  `CO_API_KEY` environment variable will be used if available.
118
147
  cohere_client: An existing Cohere async client to use. If provided,
@@ -120,12 +149,17 @@ class CohereModel(Model):
120
149
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
121
150
  """
122
151
  self._model_name: CohereModelName = model_name
123
- if cohere_client is not None:
152
+
153
+ if provider is not None:
154
+ if isinstance(provider, str):
155
+ provider = infer_provider(provider)
156
+ self.client = provider.client
157
+ elif cohere_client is not None:
124
158
  assert http_client is None, 'Cannot provide both `cohere_client` and `http_client`'
125
159
  assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
126
160
  self.client = cohere_client
127
161
  else:
128
- self.client = AsyncClientV2(api_key=api_key, httpx_client=http_client)
162
+ self.client = AsyncClientV2(api_key=api_key, httpx_client=http_client or cached_async_http_client())
129
163
 
130
164
  @property
131
165
  def base_url(self) -> str:
@@ -118,7 +118,7 @@ class InstrumentedModel(WrapperModel):
118
118
  model_settings: ModelSettings | None,
119
119
  model_request_parameters: ModelRequestParameters,
120
120
  ) -> tuple[ModelResponse, Usage]:
121
- with self._instrument(messages, model_settings) as finish:
121
+ with self._instrument(messages, model_settings, model_request_parameters) as finish:
122
122
  response, usage = await super().request(messages, model_settings, model_request_parameters)
123
123
  finish(response, usage)
124
124
  return response, usage
@@ -130,7 +130,7 @@ class InstrumentedModel(WrapperModel):
130
130
  model_settings: ModelSettings | None,
131
131
  model_request_parameters: ModelRequestParameters,
132
132
  ) -> AsyncIterator[StreamedResponse]:
133
- with self._instrument(messages, model_settings) as finish:
133
+ with self._instrument(messages, model_settings, model_request_parameters) as finish:
134
134
  response_stream: StreamedResponse | None = None
135
135
  try:
136
136
  async with super().request_stream(
@@ -146,6 +146,7 @@ class InstrumentedModel(WrapperModel):
146
146
  self,
147
147
  messages: list[ModelMessage],
148
148
  model_settings: ModelSettings | None,
149
+ model_request_parameters: ModelRequestParameters,
149
150
  ) -> Iterator[Callable[[ModelResponse, Usage], None]]:
150
151
  operation = 'chat'
151
152
  span_name = f'{operation} {self.model_name}'
@@ -155,6 +156,13 @@ class InstrumentedModel(WrapperModel):
155
156
  attributes: dict[str, AttributeValue] = {
156
157
  'gen_ai.operation.name': operation,
157
158
  **self.model_attributes(self.wrapped),
159
+ 'model_request_parameters': json.dumps(InstrumentedModel.serialize_any(model_request_parameters)),
160
+ 'logfire.json_schema': json.dumps(
161
+ {
162
+ 'type': 'object',
163
+ 'properties': {'model_request_parameters': {'type': 'object'}},
164
+ }
165
+ ),
158
166
  }
159
167
 
160
168
  if model_settings:
@@ -207,7 +215,10 @@ class InstrumentedModel(WrapperModel):
207
215
  'logfire.json_schema': json.dumps(
208
216
  {
209
217
  'type': 'object',
210
- 'properties': {attr_name: {'type': 'array'}},
218
+ 'properties': {
219
+ attr_name: {'type': 'array'},
220
+ 'model_request_parameters': {'type': 'object'},
221
+ },
211
222
  }
212
223
  ),
213
224
  }
@@ -99,7 +99,7 @@ class OpenAIModel(Model):
99
99
  system_prompt_role: OpenAISystemPromptRole | None = field(default=None)
100
100
 
101
101
  _model_name: OpenAIModelName = field(repr=False)
102
- _system: str = field(repr=False)
102
+ _system: str = field(default='openai', repr=False)
103
103
 
104
104
  @overload
105
105
  def __init__(
@@ -108,7 +108,6 @@ class OpenAIModel(Model):
108
108
  *,
109
109
  provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] = 'openai',
110
110
  system_prompt_role: OpenAISystemPromptRole | None = None,
111
- system: str = 'openai',
112
111
  ) -> None: ...
113
112
 
114
113
  @deprecated('Use the `provider` parameter instead of `base_url`, `api_key`, `openai_client` and `http_client`.')
@@ -123,7 +122,6 @@ class OpenAIModel(Model):
123
122
  openai_client: AsyncOpenAI | None = None,
124
123
  http_client: AsyncHTTPClient | None = None,
125
124
  system_prompt_role: OpenAISystemPromptRole | None = None,
126
- system: str = 'openai',
127
125
  ) -> None: ...
128
126
 
129
127
  def __init__(
@@ -136,7 +134,6 @@ class OpenAIModel(Model):
136
134
  openai_client: AsyncOpenAI | None = None,
137
135
  http_client: AsyncHTTPClient | None = None,
138
136
  system_prompt_role: OpenAISystemPromptRole | None = None,
139
- system: str = 'openai',
140
137
  ):
141
138
  """Initialize an OpenAI model.
142
139
 
@@ -155,8 +152,6 @@ class OpenAIModel(Model):
155
152
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
156
153
  system_prompt_role: The role to use for the system prompt message. If not provided, defaults to `'system'`.
157
154
  In the future, this may be inferred from the model name.
158
- system: The model provider used, defaults to `openai`. This is for observability purposes, you must
159
- customize the `base_url` and `api_key` to use a different provider.
160
155
  """
161
156
  self._model_name = model_name
162
157
 
@@ -185,7 +180,6 @@ class OpenAIModel(Model):
185
180
  else:
186
181
  self.client = AsyncOpenAI(base_url=base_url, api_key=api_key, http_client=cached_async_http_client())
187
182
  self.system_prompt_role = system_prompt_role
188
- self._system = system
189
183
 
190
184
  @property
191
185
  def base_url(self) -> str:
@@ -279,7 +273,7 @@ class OpenAIModel(Model):
279
273
  tool_choice=tool_choice or NOT_GIVEN,
280
274
  stream=stream,
281
275
  stream_options={'include_usage': True} if stream else NOT_GIVEN,
282
- max_tokens=model_settings.get('max_tokens', NOT_GIVEN),
276
+ max_completion_tokens=model_settings.get('max_tokens', NOT_GIVEN),
283
277
  temperature=model_settings.get('temperature', NOT_GIVEN),
284
278
  top_p=model_settings.get('top_p', NOT_GIVEN),
285
279
  timeout=model_settings.get('timeout', NOT_GIVEN),
@@ -77,5 +77,9 @@ def infer_provider(provider: str) -> Provider[Any]:
77
77
  from .mistral import MistralProvider
78
78
 
79
79
  return MistralProvider()
80
+ elif provider == 'cohere':
81
+ from .cohere import CohereProvider
82
+
83
+ return CohereProvider()
80
84
  else: # pragma: no cover
81
85
  raise ValueError(f'Unknown provider: {provider}')
@@ -0,0 +1,72 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ import os
4
+
5
+ from httpx import AsyncClient as AsyncHTTPClient
6
+
7
+ from pydantic_ai.models import cached_async_http_client
8
+
9
+ try:
10
+ from cohere import AsyncClientV2
11
+ except ImportError as _import_error: # pragma: no cover
12
+ raise ImportError(
13
+ 'Please install the `cohere` package to use the Cohere provider, '
14
+ 'you can use the `cohere` optional group — `pip install "pydantic-ai-slim[cohere]"`'
15
+ ) from _import_error
16
+
17
+
18
+ from . import Provider
19
+
20
+
21
+ class CohereProvider(Provider[AsyncClientV2]):
22
+ """Provider for Cohere API."""
23
+
24
+ @property
25
+ def name(self) -> str:
26
+ return 'cohere'
27
+
28
+ @property
29
+ def base_url(self) -> str:
30
+ client_wrapper = self.client._client_wrapper # type: ignore
31
+ return str(client_wrapper.get_base_url())
32
+
33
+ @property
34
+ def client(self) -> AsyncClientV2:
35
+ return self._client
36
+
37
+ def __init__(
38
+ self,
39
+ *,
40
+ api_key: str | None = None,
41
+ cohere_client: AsyncClientV2 | None = None,
42
+ http_client: AsyncHTTPClient | None = None,
43
+ ) -> None:
44
+ """Create a new Cohere provider.
45
+
46
+ Args:
47
+ api_key: The API key to use for authentication, if not provided, the `CO_API_KEY` environment variable
48
+ will be used if available.
49
+ cohere_client: An existing
50
+ [AsyncClientV2](https://github.com/cohere-ai/cohere-python)
51
+ client to use. If provided, `api_key` and `http_client` must be `None`.
52
+ http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
53
+ """
54
+ if cohere_client is not None:
55
+ assert http_client is None, 'Cannot provide both `cohere_client` and `http_client`'
56
+ assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
57
+ self._client = cohere_client
58
+ else:
59
+ api_key = api_key or os.environ.get('CO_API_KEY')
60
+ if api_key is None:
61
+ raise ValueError(
62
+ 'Set the `CO_API_KEY` environment variable or pass it via `CohereProvider(api_key=...)`'
63
+ 'to use the Cohere provider.'
64
+ )
65
+
66
+ base_url = os.environ.get('CO_BASE_URL')
67
+ if http_client is not None:
68
+ self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
69
+ else:
70
+ self._client = AsyncClientV2(
71
+ api_key=api_key, httpx_client=cached_async_http_client(), base_url=base_url
72
+ )
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai-slim"
7
- version = "0.0.43"
7
+ version = "0.0.44"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
9
9
  authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
10
10
  license = "MIT"
@@ -36,7 +36,7 @@ dependencies = [
36
36
  "griffe>=1.3.2",
37
37
  "httpx>=0.27",
38
38
  "pydantic>=2.10",
39
- "pydantic-graph==0.0.43",
39
+ "pydantic-graph==0.0.44",
40
40
  "exceptiongroup; python_version < '3.11'",
41
41
  "opentelemetry-api>=1.28.0",
42
42
  "typing-inspection>=0.4.0",
@@ -46,7 +46,7 @@ dependencies = [
46
46
  # WARNING if you add optional groups, please update docs/install.md
47
47
  logfire = ["logfire>=2.3"]
48
48
  # Models
49
- openai = ["openai>=1.65.1"]
49
+ openai = ["openai>=1.67.0"]
50
50
  cohere = ["cohere>=5.13.11"]
51
51
  vertexai = ["google-auth>=2.36.0", "requests>=2.32.3"]
52
52
  anthropic = ["anthropic>=0.49.0"]